hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9dbded985b27cf9bd1bae65fff1f6bc608d3c595
| 98
|
py
|
Python
|
1 - Beginner/1155.py
|
andrematte/uri-submissions
|
796e7fee56650d9e882880318d6e7734038be2dc
|
[
"MIT"
] | 1
|
2020-09-09T12:48:09.000Z
|
2020-09-09T12:48:09.000Z
|
1 - Beginner/1155.py
|
andrematte/uri-submissions
|
796e7fee56650d9e882880318d6e7734038be2dc
|
[
"MIT"
] | null | null | null |
1 - Beginner/1155.py
|
andrematte/uri-submissions
|
796e7fee56650d9e882880318d6e7734038be2dc
|
[
"MIT"
] | null | null | null |
# URI Online Judge 1155
S = 0
for i in range(1,101):
S += 1/i
print('{:.2f}'.format(S))
| 12.25
| 25
| 0.530612
|
41b5e124be3a3fba9be2cc187c010c3a3a87d5a9
| 2,672
|
py
|
Python
|
Hackerrank/Dijkstra/Dijkstra.py
|
rahil-1407/Data-Structure-and-Algorithms
|
ea3eb9849aeb2716ef5812a0b5621a28120b1880
|
[
"MIT"
] | 51
|
2021-01-14T04:05:55.000Z
|
2022-01-25T11:25:37.000Z
|
Hackerrank/Dijkstra/Dijkstra.py
|
rahil-1407/Data-Structure-and-Algorithms
|
ea3eb9849aeb2716ef5812a0b5621a28120b1880
|
[
"MIT"
] | 638
|
2020-12-27T18:49:53.000Z
|
2021-11-21T05:22:52.000Z
|
Hackerrank/Dijkstra/Dijkstra.py
|
rahil-1407/Data-Structure-and-Algorithms
|
ea3eb9849aeb2716ef5812a0b5621a28120b1880
|
[
"MIT"
] | 124
|
2021-01-30T06:40:20.000Z
|
2021-11-21T15:14:40.000Z
|
# Program to find the shortest path using Dijktra's algorithm
#this is implemented using queue
import sys
import queue
class Vertex:
def __init__(self):
self.edges = {}
def get_edges(self):
return self.edges
def add_edge(self, value, distance):
if value not in self.edges or distance < self.edges[value]:
self.edges[value] = distance
class Graph:
#Adding all Edges
def __init__(self, N):
self.vertices = {}
while (N > 0):
self.vertices[N] = Vertex()
N -= 1
def get_vertices(self):
return self.vertices
def get_vertex(self, value):
return self.vertices[value]
def add_vertex(self, value, vertex):
self.vertices[value] = vertex
class Dijkstra:
def __init__(self, graph):
self.graph = graph
def calculate(self, start):
#for not visited neighbours ,update their distance if they could be reached with a shorter distance
solved = {start: 0}
adjacents = queue.PriorityQueue()
self.update_adjacents(start, solved, adjacents)
while not adjacents.empty():
(distance, value) = adjacents.get()
if value in solved:
continue
solved[value] = distance
self.update_adjacents(value, solved, adjacents)
return solved
def update_adjacents(self, parent, solved, adjacents):
#if the distance was updating then they need to be pushed in priority queue with minimum distance
edges = self.graph.get_vertex(parent).get_edges()
for value, distance in edges.items():
adjacents.put((solved[parent] + distance, value))
def read_integers():
return [int(x) for x in sys.stdin.readline().split(" ")]
def build_graph(N, M): #Initializing Graph
graph = Graph(N)
while (M > 0):
(x, y, R) = read_integers()
graph.get_vertex(x).add_edge(y, R)
graph.get_vertex(y).add_edge(x, R)
M -= 1
return graph
def print_distances(distances, N, S):
#Printing The Required Output
for i in range(1, N + 1):
if (i == S):
continue
distance = -1 if i not in distances else distances[i]
print(distance, end=" ")
print()
def execute_test_case():
(N, M) = read_integers()
graph = build_graph(N, M)
dijkstra = Dijkstra(graph)
S = int(sys.stdin.readline())
distances = dijkstra.calculate(S)
print_distances(distances, N, S)
def main():
T = int(sys.stdin.readline()) #Taking Input
while (T > 0):
execute_test_case()
T -= 1
if __name__ == "__main__":
main()
| 25.941748
| 107
| 0.609281
|
6a6b0e402a5efe8f4d0555c8ea4d2c3d48342465
| 44
|
py
|
Python
|
tests/__init__.py
|
other-juju/track-viz
|
5276324b5ee231b54cf575216c9cabacd590f30f
|
[
"MIT"
] | 2
|
2021-12-10T11:58:22.000Z
|
2021-12-15T09:06:08.000Z
|
tests/__init__.py
|
other-juju/track-viz
|
5276324b5ee231b54cf575216c9cabacd590f30f
|
[
"MIT"
] | 32
|
2021-12-08T13:39:54.000Z
|
2022-03-08T03:22:41.000Z
|
tests/__init__.py
|
other-juju/track-viz
|
5276324b5ee231b54cf575216c9cabacd590f30f
|
[
"MIT"
] | 7
|
2021-12-14T21:08:41.000Z
|
2022-01-23T12:19:33.000Z
|
"""Test suite for the track_viz package."""
| 22
| 43
| 0.704545
|
93442eba39e0430e0bbd67a783e4b1f9410732f8
| 172
|
py
|
Python
|
mmseg/utils/__init__.py
|
albert-yue/TMANet
|
e19b25a318d1569ba58ccf47e03ae80385aef40b
|
[
"Apache-2.0"
] | null | null | null |
mmseg/utils/__init__.py
|
albert-yue/TMANet
|
e19b25a318d1569ba58ccf47e03ae80385aef40b
|
[
"Apache-2.0"
] | null | null | null |
mmseg/utils/__init__.py
|
albert-yue/TMANet
|
e19b25a318d1569ba58ccf47e03ae80385aef40b
|
[
"Apache-2.0"
] | null | null | null |
from .collect_env import collect_env
from .logger import get_root_logger
from .dist_utils import get_dist_env
__all__ = ['get_root_logger', 'collect_env', 'get_dist_env']
| 28.666667
| 60
| 0.813953
|
f7da5eed14d93faeb0a1a396b348f553343883e2
| 4,508
|
py
|
Python
|
week5-machine_learning/scripts/run_tune_example.py
|
Kaminyou/110-1-NTU-DBME5028
|
5aaef62cb5a3be4cbba28c2d252964a614183132
|
[
"MIT"
] | 6
|
2021-10-06T03:13:59.000Z
|
2021-11-07T12:59:37.000Z
|
week5-machine_learning/scripts/run_tune_example.py
|
Kaminyou/110-1-NTU-DBME5028
|
5aaef62cb5a3be4cbba28c2d252964a614183132
|
[
"MIT"
] | null | null | null |
week5-machine_learning/scripts/run_tune_example.py
|
Kaminyou/110-1-NTU-DBME5028
|
5aaef62cb5a3be4cbba28c2d252964a614183132
|
[
"MIT"
] | null | null | null |
"""
python ./scripts/run_tune_example.py
# To see results
from ray.tune import Analysis
analysis = Analysis(PATH_TO_EXP_DIR)
df = analysis.trial_dataframes
"""
import sys
import os
import numpy as np
from random import shuffle
from collections import deque
from dataclasses import dataclass, asdict
import torch
import torch.nn as nn
from torch import optim
from ray import tune
sys.path.append(".")
from src.utils import load_and_process_digits
from src.models import LogisticRegressionTorch
def simple_loader(inputs, targets, batch_size=128, shuffle_per_iteration=20):
index = 0
while True:
indexes_get = np.arange(index * batch_size, (index + 1) * batch_size) % len(inputs)
x_ = np.take(inputs, indexes_get, axis=0)
y_ = np.take(targets, indexes_get, axis=0)
index += 1
if index % shuffle_per_iteration == 0:
full_index = np.arange(len(x_))
shuffle(full_index)
inputs = np.take(inputs, full_index, axis=0)
targets = np.take(targets, full_index, axis=0)
yield x_, y_
def train_digits(config: dict):
x_train, y_train, x_valid, y_valid, x_test, y_test = load_and_process_digits()
train_loader = simple_loader(x_train, y_train, batch_size=config["batch_size"])
model = LogisticRegressionTorch(input_dim=x_train.shape[-1], output_dim=10)
optimizer = optim.SGD(model.parameters(), lr=config["learning_rate"])
loss_fn = nn.CrossEntropyLoss()
train_losses, valid_losses = [], []
bst_loss = 1e+4
patient_counter = 0
for i_epoch in range(config["num_epochs"]):
loss_record = deque(maxlen=100)
for _ in range(len(x_train) // config["batch_size"]):
x, y = next(train_loader)
logits = model(torch.from_numpy(x))
loss_train = loss_fn(logits, torch.from_numpy(y))
### Do regularization
if config["l1_alpha"] > 0:
l1_term = torch.tensor(0.)
for model_params in model.parameters():
reg = torch.abs(model_params).sum()
l1_term += reg
loss_train = loss_train + config["l1_alpha"] * l1_term
if config["l2_alpha"] > 0:
l2_term = torch.tensor(0.)
for model_params in model.parameters():
reg = torch.norm(model_params)
l2_term += reg
loss_train = loss_train + config["l2_alpha"] * l2_term
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
loss_record.append(loss_train.detach().cpu().numpy())
with torch.no_grad():
yp_logits = model(torch.from_numpy(x_valid))
loss_valid = loss_fn(yp_logits, torch.from_numpy(y_valid))
loss_valid = loss_valid.detach().cpu().numpy()
print("Epoch: {}/{}, Training Loss: {:.3f}, Validation Loss: {:.3f}".format(
str(i_epoch + 1).zfill(4),
config["num_epochs"],
np.mean(loss_record),
loss_valid
), flush=True, end="\r")
train_losses.append(np.mean(loss_record))
valid_losses.append(loss_valid)
tune.report(validation_loss=loss_valid) # validation_loss can be keywords you want
### Do earlystopping
if patient_counter >= config["n_earlystopping_rounds"]:
return model, train_losses, valid_losses
if loss_valid < bst_loss:
bst_loss = loss_valid
patient_counter = 0
else:
patient_counter += 1
return model, train_losses, valid_losses
@dataclass
class TrainConfig:
batch_size: int
learning_rate: float
num_epochs: int = 500
l1_alpha: float = 0.
l2_alpha: float = 0.
n_earlystopping_rounds: int = 1e+8
def to_dict(self):
return asdict(self)
if __name__ == "__main__":
# Force use CPU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
train_config = TrainConfig(
batch_size=tune.choice([64, 128]),
learning_rate=tune.grid_search([0.5, 1, 1.5]),
num_epochs=1000,
l1_alpha=tune.grid_search([0, 0.001, 0.01]),
l2_alpha=tune.grid_search([0, 0.001, 0.01]),
# n_earlystopping_rounds
)
analysis = tune.run(
train_digits,
config=train_config.to_dict(),
num_samples=3,
progress_reporter=tune.CLIReporter(max_error_rows=20)
) # Total num_trials = num_samples**tunable_params
| 31.524476
| 91
| 0.623114
|
842747376cf920869ce057e6252604f973257173
| 358
|
py
|
Python
|
cogs/help.py
|
cat-central/bonbons
|
47fd1aa6ba16b0e705d2bfc716db60707def1d37
|
[
"MIT"
] | 3
|
2022-02-21T11:48:22.000Z
|
2022-03-29T04:59:59.000Z
|
cogs/help.py
|
cat-central/bonbons
|
47fd1aa6ba16b0e705d2bfc716db60707def1d37
|
[
"MIT"
] | 1
|
2022-02-27T16:57:58.000Z
|
2022-02-27T16:57:58.000Z
|
cogs/help.py
|
sifte/bonbons
|
ca2f367b445b0374aabbc17cb17be1b020879a3a
|
[
"MIT"
] | null | null | null |
from discord.ext.commands import Cog
from helpers.bot import Bonbons
from helpers.help.help import CustomHelpCommand
class Help(Cog):
def __init__(self, bot: Bonbons) -> None:
self.bot = bot
self.bot.help_command = CustomHelpCommand()
async def setup(bot: Bonbons) -> None:
print("Loaded: Help")
await bot.add_cog(Help(bot))
| 22.375
| 51
| 0.701117
|
cca5ad1db73624d5f51e7a1b02622662e51e4897
| 1,320
|
py
|
Python
|
examples/sorting/cluster_auto.py
|
espenhgn/SpikeSort
|
68dbc2180609e0e7430453229ab1f3a2b2d59bdc
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
examples/sorting/cluster_auto.py
|
espenhgn/SpikeSort
|
68dbc2180609e0e7430453229ab1f3a2b2d59bdc
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
examples/sorting/cluster_auto.py
|
espenhgn/SpikeSort
|
68dbc2180609e0e7430453229ab1f3a2b2d59bdc
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#!/usr/bin/env python
#coding=utf-8
"""
Based on raw recordings detect spikes, calculate features and do automatic
clustering with gaussian mixture models.
"""
import numpy as np
import os, sys
import spike_sort as sort
from spike_sort.io.filters import PyTablesFilter, BakerlabFilter
import spike_sort.ui.manual_sort
import tables
DATAPATH = os.environ['DATAPATH']
if __name__ == "__main__":
h5_fname = os.path.join(DATAPATH, "tutorial.h5")
h5filter = PyTablesFilter(h5_fname, 'a')
dataset = "/SubjectA/session01/el1"
sp_win = [-0.2, 0.8]
sp = h5filter.read_sp(dataset)
spt = sort.extract.detect_spikes(sp, contact=3,
thresh='auto')
spt = sort.extract.align_spikes(sp, spt, sp_win, type="max", resample=10)
sp_waves = sort.extract.extract_spikes(sp, spt, sp_win)
features = sort.features.combine(
(
sort.features.fetP2P(sp_waves),
sort.features.fetPCs(sp_waves)),
norm=True
)
clust_idx = sort.cluster.cluster("gmm",features,4)
spike_sort.ui.plotting.plot_features(features, clust_idx)
spike_sort.ui.plotting.figure()
spike_sort.ui.plotting.plot_spikes(sp_waves, clust_idx,n_spikes=200)
spike_sort.ui.plotting.show()
h5filter.close()
| 26.4
| 77
| 0.668939
|
a63a23e0faefbb82bca7a77f3b7c85a8b25ecfb2
| 265
|
py
|
Python
|
src/cogs/Cog.py
|
akabinds/pincer-bot-template
|
aed9a708b4c96deb030b23e327d64474cc92dcea
|
[
"MIT"
] | 2
|
2022-01-06T02:25:19.000Z
|
2022-01-06T02:28:05.000Z
|
src/cogs/Cog.py
|
akabinds/pincer-bot-template
|
aed9a708b4c96deb030b23e327d64474cc92dcea
|
[
"MIT"
] | null | null | null |
src/cogs/Cog.py
|
akabinds/pincer-bot-template
|
aed9a708b4c96deb030b23e327d64474cc92dcea
|
[
"MIT"
] | null | null | null |
from pincer import Client, command
class Cog:
def __init__(self, client: Client) -> None:
self.client = client
@command(guild="YOUR_TESTING_GUILD_ID", name='ping', description='Pong!')
async def ping(self):
return "pong"
setup = Cog
| 20.384615
| 77
| 0.656604
|
12719b688cbedfc184310183141155e6ea88f7aa
| 1,490
|
py
|
Python
|
Aula 38/Back/Dao/dao_sgds.py
|
ohanamirella/TrabalhosPython
|
453a97848654b0391d0d717bf102f6c466f79b3d
|
[
"MIT"
] | 1
|
2020-03-09T13:38:55.000Z
|
2020-03-09T13:38:55.000Z
|
Aula 38/Back/Dao/dao_sgds.py
|
ohanamirella/TrabalhosPython
|
453a97848654b0391d0d717bf102f6c466f79b3d
|
[
"MIT"
] | null | null | null |
Aula 38/Back/Dao/dao_sgds.py
|
ohanamirella/TrabalhosPython
|
453a97848654b0391d0d717bf102f6c466f79b3d
|
[
"MIT"
] | null | null | null |
import MySQLdb
from Model.sgbds_model import SgbdsModel
class SgbdsDao:
conexao = MySQLdb.connect(host='mysql.padawans.dev', database='padawans', user='padawans', passwd='vm2019')
cursor = conexao.cursor()
def insert(self, sgbds:SgbdsModel):
inserir = f"""
INSERT INTO tb_sgbds (
id_sgbds,
sgbd,
desc_sgbd
)
VALUES (
{sgbds.id_sgbds},
'{sgbds.sgbd}',
'{sgbds.desc_sgbd}'
)
"""
self.cursor.execute(inserir)
self.conexao.commit()
id_sgbds_inserido = self.cursor.lastrowid
return id_sgbds_inserido
def read(self, id):
ler = f"""
SELECT * FROM tb_sgbds
WHERE id_sgbds = {id}
"""
self.cursor.execute(ler)
lido = self.cursor.fetchall()
return lido
def update(self, sgbds:SgbdsModel):
atualizar = f"""
UPDATE tb_sgbds
SET
id_sgbds = {sgbds.id_sgbds}
sgbds = '{sgbds.sgbds}'
desc_sgbds = '{sgbds.desc_sgbds}'
WHERE id_sgbds = {sgbds.id_sgbds}
"""
self.cursor.execute(atualizar)
self.conexao.commit()
def delete(self, id):
deletar = f"""
DELETE FROM tb_sgbds
WHERE id_sgbds = {id}
"""
self.cursor.execute(deletar)
self.conexao.commit()
| 27.592593
| 111
| 0.512081
|
ea2bfdd39cf8fd80d4e2883fb02ed24e16b01ba4
| 2,987
|
py
|
Python
|
app/recipe/tests/test_ingredients_api.py
|
tsatsujnr139/recipe-app-api
|
08b26254de48277518a204328ae1bf753049354b
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
tsatsujnr139/recipe-app-api
|
08b26254de48277518a204328ae1bf753049354b
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
tsatsujnr139/recipe-app-api
|
08b26254de48277518a204328ae1bf753049354b
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
def sample_user():
return get_user_model().objects.create_user(email='user@company.com',
password='password')
class PublicIngredientsApiTests(TestCase):
"""test the public ingredients api's"""
def setUp(self):
self.client = APIClient()
def test_login_required_to_retrieve_ingredient(self):
"""test that login is required to retrieve tags"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientApiTests(TestCase):
"""test the authorized user ingredient API's"""
def setUp(self):
self.user = sample_user()
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_ingredients(self):
"""Test retrieving ingredients"""
Ingredient.objects.create(user=self.user, name='salami')
Ingredient.objects.create(user=self.user, name='onion')
res = self.client.get(INGREDIENTS_URL)
ingredient = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredient, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_authenticated_user(self):
"""Test that ingredients are for authenticated user"""
user2 = get_user_model().objects.create_user(email='user2@company.com',
password='password')
Ingredient.objects.create(user=user2, name='bacon')
ingredient = Ingredient.objects.create(user=self.user, name='nutmeg')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successfully(self):
"""test that new ingredient is created successfully"""
payload = {
'name': 'test_ingredient'
}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists
self.assertTrue(exists)
def test_create_ingredient_invalid_payload_fails(self):
"""test creating an ingredient with invalid payload
returns valiation error"""
payload = {
'name': ''
}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 33.561798
| 79
| 0.670238
|
095086de424b94ef436edd3bd0fcf917cc36dcf7
| 71,882
|
py
|
Python
|
wrappers/python/indy/anoncreds.py
|
evernym/indy-sdk
|
714d449353518f929d9787d3156af785e2a42ccb
|
[
"Apache-2.0"
] | 5
|
2018-04-09T12:26:28.000Z
|
2019-06-12T01:45:30.000Z
|
wrappers/python/indy/anoncreds.py
|
evernym/indy-sdk
|
714d449353518f929d9787d3156af785e2a42ccb
|
[
"Apache-2.0"
] | 9
|
2019-01-22T22:31:54.000Z
|
2019-04-11T21:45:09.000Z
|
wrappers/python/indy/anoncreds.py
|
evernym/indy-sdk
|
714d449353518f929d9787d3156af785e2a42ccb
|
[
"Apache-2.0"
] | 19
|
2018-04-25T16:08:43.000Z
|
2022-01-11T10:18:38.000Z
|
from .libindy import do_call, create_cb
from typing import Optional
from ctypes import *
import logging
async def issuer_create_schema(issuer_did: str,
name: str,
version: str,
attrs: str) -> (str, str):
"""
Create credential schema entity that describes credential attributes list and allows credentials
interoperability.
Schema is public and intended to be shared with all anoncreds workflow actors usually by publishing SCHEMA transaction
to Indy distributed ledger.
It is IMPORTANT for current version POST Schema in Ledger and after that GET it from Ledger
with correct seq_no to save compatibility with Ledger.
After that can call indy_issuer_create_and_store_credential_def to build corresponding Credential Definition.
:param issuer_did: DID of schema issuer
:param name: a name the schema
:param version: a version of the schema
:param attrs: a list of schema attributes descriptions (the number of attributes should be less or equal than 125)
:return:
schema_id: identifier of created schema
schema_json: schema as json
"""
logger = logging.getLogger(__name__)
logger.debug("issuer_create_schema: >>> issuer_did: %r, name: %r, version: %r, attrs: %r",
issuer_did,
name,
version,
attrs)
if not hasattr(issuer_create_schema, "cb"):
logger.debug("issuer_create_schema: Creating callback")
issuer_create_schema.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p))
c_issuer_did = c_char_p(issuer_did.encode('utf-8'))
c_name = c_char_p(name.encode('utf-8'))
c_version = c_char_p(version.encode('utf-8'))
c_attrs = c_char_p(attrs.encode('utf-8'))
(schema_id, schema_json) = await do_call('indy_issuer_create_schema',
c_issuer_did,
c_name,
c_version,
c_attrs,
issuer_create_schema.cb)
res = (schema_id.decode(), schema_json.decode())
logger.debug("issuer_create_schema: <<< res: %r", res)
return res
async def issuer_create_and_store_credential_def(wallet_handle: int,
issuer_did: str,
schema_json: str,
tag: str,
signature_type: Optional[str],
config_json: Optional[str]) -> (str, str):
"""
Create credential definition entity that encapsulates credentials issuer DID, credential schema, secrets used for
signing credentials and secrets used for credentials revocation.
Credential definition entity contains private and public parts. Private part will be stored in the wallet.
Public part will be returned as json intended to be shared with all anoncreds workflow actors usually by
publishing CRED_DEF transaction to Indy distributed ledger.
It is IMPORTANT for current version GET Schema from Ledger with correct seq_no to save compatibility with Ledger.
:param wallet_handle: wallet handle (created by open_wallet).
:param issuer_did: a DID of the issuer signing cred_def transaction to the Ledger
:param schema_json: credential schema as a json
:param tag: allows to distinct between credential definitions for the same issuer and schema
:param signature_type: credential definition type (optional, 'CL' by default) that defines credentials signature and revocation math.
Supported types are:
- 'CL': Camenisch-Lysyanskaya credential signature type
:param config_json: (optional) type-specific configuration of credential definition as json:
- 'CL':
- support_revocation: whether to request non-revocation credential (optional, default false)
:return:
cred_def_id: identifier of created credential definition
cred_def_json: public part of created credential definition
"""
logger = logging.getLogger(__name__)
logger.debug("issuer_create_and_store_credential_def: >>> wallet_handle: %r, issuer_did: %r, schema_json: %r,"
" tag: %r, signature_type: %r, config_json: %r",
wallet_handle,
issuer_did,
schema_json,
tag,
signature_type,
config_json)
if not hasattr(issuer_create_and_store_credential_def, "cb"):
logger.debug("issuer_create_and_store_credential_def: Creating callback")
issuer_create_and_store_credential_def.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_issuer_did = c_char_p(issuer_did.encode('utf-8'))
c_schema_json = c_char_p(schema_json.encode('utf-8'))
c_tag = c_char_p(tag.encode('utf-8'))
c_signature_type = c_char_p(signature_type.encode('utf-8')) if signature_type is not None else None
c_config_json = c_char_p(config_json.encode('utf-8')) if config_json is not None else None
(credential_def_id, credential_def_json) = await do_call('indy_issuer_create_and_store_credential_def',
c_wallet_handle,
c_issuer_did,
c_schema_json,
c_tag,
c_signature_type,
c_config_json,
issuer_create_and_store_credential_def.cb)
res = (credential_def_id.decode(), credential_def_json.decode())
logger.debug("issuer_create_and_store_credential_def: <<< res: %r", res)
return res
async def issuer_create_and_store_revoc_reg(wallet_handle: int,
issuer_did: str,
revoc_def_type: Optional[str],
tag: str,
cred_def_id: str,
config_json: str,
tails_writer_handle: int) -> (str, str, str):
"""
Create a new revocation registry for the given credential definition as tuple of entities:
- Revocation registry definition that encapsulates credentials definition reference, revocation type specific configuration and
secrets used for credentials revocation
- Revocation registry state that stores the information about revoked entities in a non-disclosing way. The state can be
represented as ordered list of revocation registry entries were each entry represents the list of revocation or issuance operations.
Revocation registry definition entity contains private and public parts. Private part will be stored in the wallet. Public part
will be returned as json intended to be shared with all anoncreds workflow actors usually by publishing REVOC_REG_DEF transaction
to Indy distributed ledger.
Revocation registry state is stored on the wallet and also intended to be shared as the ordered list of REVOC_REG_ENTRY transactions.
This call initializes the state in the wallet and returns the initial entry.
Some revocation registry types (for example, 'CL_ACCUM') can require generation of binary blob called tails used to hide information about revoked credentials in public
revocation registry and intended to be distributed out of leger (REVOC_REG_DEF transaction will still contain uri and hash of tails).
This call requires access to pre-configured blob storage writer instance handle that will allow to write generated tails.
:param wallet_handle: wallet handle (created by open_wallet).
:param issuer_did: a DID of the issuer signing transaction to the Ledger
:param revoc_def_type: revocation registry type (optional, default value depends on credential definition type). Supported types are:
- 'CL_ACCUM': Type-3 pairing based accumulator. Default for 'CL' credential definition type
:param tag: allows to distinct between revocation registries for the same issuer and credential definition
:param cred_def_id: id of stored in ledger credential definition
:param config_json: type-specific configuration of revocation registry as json:
- 'CL_ACCUM':
"issuance_type": (optional) type of issuance. Currently supported:
1) ISSUANCE_BY_DEFAULT: all indices are assumed to be issued and initial accumulator is calculated over all indices;
Revocation Registry is updated only during revocation.
2) ISSUANCE_ON_DEMAND: nothing is issued initially accumulator is 1 (used by default);
"max_cred_num": maximum number of credentials the new registry can process (optional, default 100000)
}
:param tails_writer_handle:
:return:
revoc_reg_id: identifier of created revocation registry definition
revoc_reg_def_json: public part of revocation registry definition
revoc_reg_entry_json: revocation registry entry that defines initial state of revocation registry
"""
logger = logging.getLogger(__name__)
logger.debug("issuer_create_and_store_revoc_reg: >>> wallet_handle: %r, issuer_did: %r, revoc_def_type: %r,"
" tag: %r, cred_def_id: %r, config_json: %r, tails_writer_handle: %r",
wallet_handle,
issuer_did,
revoc_def_type,
tag,
cred_def_id,
config_json,
tails_writer_handle)
if not hasattr(issuer_create_and_store_revoc_reg, "cb"):
logger.debug("issuer_create_and_store_revoc_reg: Creating callback")
issuer_create_and_store_revoc_reg.cb = create_cb(
CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_issuer_did = c_char_p(issuer_did.encode('utf-8'))
c_revoc_def_type = c_char_p(revoc_def_type.encode('utf-8')) if revoc_def_type is not None else None
c_tag = c_char_p(tag.encode('utf-8'))
c_cred_def_id = c_char_p(cred_def_id.encode('utf-8'))
c_config_json = c_char_p(config_json.encode('utf-8'))
c_tails_writer_handle = c_int32(tails_writer_handle)
(rev_reg_id, rev_reg_def_json, rev_reg_entry_json) = await do_call('indy_issuer_create_and_store_revoc_reg',
c_wallet_handle,
c_issuer_did,
c_revoc_def_type,
c_tag,
c_cred_def_id,
c_config_json,
c_tails_writer_handle,
issuer_create_and_store_revoc_reg.cb)
res = (rev_reg_id.decode(), rev_reg_def_json.decode(), rev_reg_entry_json.decode())
logger.debug("issuer_create_and_store_revoc_reg: <<< res: %r", res)
return res
async def issuer_create_credential_offer(wallet_handle: int,
cred_def_id: str) -> str:
"""
Create credential offer that will be used by Prover for
credential request creation. Offer includes nonce and key correctness proof
for authentication between protocol steps and integrity checking.
:param wallet_handle: wallet handle (created by open_wallet).
:param cred_def_id: id of credential definition stored in the wallet
:return:credential offer json:
{
"schema_id": string,
"cred_def_id": string,
// Fields below can depend on Cred Def type
"nonce": string,
"key_correctness_proof" : <key_correctness_proof>
}
"""
logger = logging.getLogger(__name__)
logger.debug("issuer_create_credential_offer: >>> wallet_handle: %r, cred_def_id: %r",
wallet_handle,
cred_def_id)
if not hasattr(issuer_create_credential_offer, "cb"):
logger.debug("issuer_create_credential_offer: Creating callback")
issuer_create_credential_offer.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_def_id = c_char_p(cred_def_id.encode('utf-8'))
credential_offer_json = await do_call('indy_issuer_create_credential_offer',
c_wallet_handle,
c_cred_def_id,
issuer_create_credential_offer.cb)
res = credential_offer_json.decode()
logger.debug("issuer_create_credential_offer: <<< res: %r", res)
return res
async def issuer_create_credential(wallet_handle: int,
cred_offer_json: str,
cred_req_json: str,
cred_values_json: str,
rev_reg_id: Optional[str],
blob_storage_reader_handle: Optional[int]) -> (str, Optional[str], Optional[str]):
"""
Check Cred Request for the given Cred Offer and issue Credential for the given Cred Request.
Cred Request must match Cred Offer. The credential definition and revocation registry definition
referenced in Cred Offer and Cred Request must be already created and stored into the wallet.
Information for this credential revocation will be store in the wallet as part of revocation registry under
generated cred_revoc_id local for this wallet.
This call returns revoc registry delta as json file intended to be shared as REVOC_REG_ENTRY transaction.
Note that it is possible to accumulate deltas to reduce ledger load.
:param wallet_handle: wallet handle (created by open_wallet).
:param cred_offer_json: a cred offer created by issuer_create_credential_offer
:param cred_req_json: a credential request created by prover_create_credential_req
:param cred_values_json: a credential containing attribute values for each of requested attribute names.
Example:
{
"attr1" : {"raw": "value1", "encoded": "value1_as_int" },
"attr2" : {"raw": "value1", "encoded": "value1_as_int" }
}
:param rev_reg_id: (Optional) id of revocation registry definition stored in the wallet
:param blob_storage_reader_handle: pre-configured blob storage reader instance handle that
will allow to read revocation tails
:return:
cred_json: Credential json containing signed credential values
{
"schema_id": string,
"cred_def_id": string,
"rev_reg_def_id", Optional<string>,
"values": <see cred_values_json above>,
// Fields below can depend on Cred Def type
"signature": <signature>,
"signature_correctness_proof": <signature_correctness_proof>
}
cred_revoc_id: local id for revocation info (Can be used for revocation of this cred)
revoc_reg_delta_json: Revocation registry delta json with a newly issued credential
"""
logger = logging.getLogger(__name__)
logger.debug("issuer_create_credential: >>> wallet_handle: %r, cred_offer_json: %r, cred_req_json: %r,"
" cred_values_json: %r, rev_reg_id: %r, blob_storage_reader_handle: %r",
wallet_handle,
cred_offer_json,
cred_req_json,
cred_values_json,
rev_reg_id,
blob_storage_reader_handle)
if not hasattr(issuer_create_credential, "cb"):
logger.debug("issuer_create_credential: Creating callback")
issuer_create_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_offer_json = c_char_p(cred_offer_json.encode('utf-8'))
c_cred_req_json = c_char_p(cred_req_json.encode('utf-8'))
c_cred_values_json = c_char_p(cred_values_json.encode('utf-8'))
c_rev_reg_id = c_char_p(rev_reg_id.encode('utf-8')) if rev_reg_id is not None else None
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle) if blob_storage_reader_handle else -1
(cred_json, cred_revoc_id, revoc_reg_delta_json) = await do_call('indy_issuer_create_credential',
c_wallet_handle,
c_cred_offer_json,
c_cred_req_json,
c_cred_values_json,
c_rev_reg_id,
c_blob_storage_reader_handle,
issuer_create_credential.cb)
cred_json = cred_json.decode()
cred_revoc_id = cred_revoc_id.decode() if cred_revoc_id else None
revoc_reg_delta_json = revoc_reg_delta_json.decode() if revoc_reg_delta_json else None
res = (cred_json, cred_revoc_id, revoc_reg_delta_json)
logger.debug("issuer_create_credential: <<< res: %r", res)
return res
async def issuer_revoke_credential(wallet_handle: int,
blob_storage_reader_handle: int,
rev_reg_id: str,
cred_revoc_id: str) -> str:
"""
Revoke a credential identified by a cred_revoc_id (returned by issuer_create_credential).
The corresponding credential definition and revocation registry must be already
created an stored into the wallet.
This call returns revoc registry delta as json file intended to be shared as REVOC_REG_ENTRY transaction.
Note that it is possible to accumulate deltas to reduce ledger load.
:param wallet_handle: wallet handle (created by open_wallet).
:param blob_storage_reader_handle: pre-configured blob storage reader instance handle that will allow
to read revocation tails
:param rev_reg_id: id of revocation registry stored in wallet
:param cred_revoc_id: local id for revocation info
:return: Revocation registry delta json with a revoked credential.
"""
logger = logging.getLogger(__name__)
logger.debug(
"issuer_revoke_credential: >>> wallet_handle: %r, blob_storage_reader_handle: %r, rev_reg_id: %r, "
"cred_revoc_id: %r",
wallet_handle,
blob_storage_reader_handle,
rev_reg_id,
cred_revoc_id)
if not hasattr(issuer_revoke_credential, "cb"):
logger.debug("issuer_revoke_credential: Creating callback")
issuer_revoke_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle)
c_rev_reg_id = c_char_p(rev_reg_id.encode('utf-8'))
c_cred_revoc_id = c_char_p(cred_revoc_id.encode('utf-8'))
revoc_reg_delta_json = await do_call('indy_issuer_revoke_credential',
c_wallet_handle,
c_blob_storage_reader_handle,
c_rev_reg_id,
c_cred_revoc_id,
issuer_revoke_credential.cb)
res = revoc_reg_delta_json.decode()
logger.debug("issuer_revoke_credential: <<< res: %r", res)
return res
# async def issuer_recover_credential(wallet_handle: int,
# blob_storage_reader_handle: int,
# rev_reg_id: str,
# cred_revoc_id: str) -> str:
# """
# Recover a credential identified by a cred_revoc_id (returned by indy_issuer_create_cred).
#
# The corresponding credential definition and revocation registry must be already
# created an stored into the wallet.
#
# This call returns revoc registry delta as json file intended to be shared as REVOC_REG_ENTRY transaction.
# Note that it is possible to accumulate deltas to reduce ledger load.
#
# :param wallet_handle: wallet handle (created by open_wallet).
# :param blob_storage_reader_handle: pre-configured blob storage reader instance handle that will allow
# to read revocation tails
# :param rev_reg_id: id of revocation registry stored in wallet
# :param cred_revoc_id: local id for revocation info
# :return: Revocation registry update json with a revoked credential
# """
#
# logger = logging.getLogger(__name__)
# logger.debug(
# "issuer_recover_credential: >>> wallet_handle: %r, blob_storage_reader_handle: %r, rev_reg_id: %r, "
# "cred_revoc_id: %r",
# wallet_handle,
# blob_storage_reader_handle,
# rev_reg_id,
# cred_revoc_id)
#
# if not hasattr(issuer_recover_credential, "cb"):
# logger.debug("issuer_recover_credential: Creating callback")
# issuer_recover_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
#
# c_wallet_handle = c_int32(wallet_handle)
# c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle)
# c_rev_reg_id = c_char_p(rev_reg_id.encode('utf-8'))
# c_cred_revoc_id = c_char_p(cred_revoc_id.encode('utf-8'))
#
# revoc_reg_delta_json = await do_call('indy_issuer_recover_credential',
# c_wallet_handle,
# c_blob_storage_reader_handle,
# c_rev_reg_id,
# c_cred_revoc_id,
# issuer_recover_credential.cb)
# res = revoc_reg_delta_json.decode()
# logger.debug("issuer_recover_credential: <<< res: %r", res)
# return res
async def issuer_merge_revocation_registry_deltas(rev_reg_delta_json: str,
other_rev_reg_delta_json: str) -> str:
"""
Merge two revocation registry deltas (returned by issuer_create_credential or issuer_revoke_credential) to accumulate common delta.
Send common delta to ledger to reduce the load.
:param rev_reg_delta_json: revocation registry delta json
:param other_rev_reg_delta_json: revocation registry delta for which PrevAccum value is equal to current accum value of rev_reg_delta_json.
:return: Merged revocation registry delta
"""
logger = logging.getLogger(__name__)
logger.debug(
"issuer_merge_revocation_registry_deltas: >>> rev_reg_delta_json: %r, other_rev_reg_delta_json: %r",
rev_reg_delta_json,
other_rev_reg_delta_json)
if not hasattr(issuer_merge_revocation_registry_deltas, "cb"):
logger.debug("issuer_merge_revocation_registry_deltas: Creating callback")
issuer_merge_revocation_registry_deltas.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_rev_reg_delta_json = c_char_p(rev_reg_delta_json.encode('utf-8'))
c_other_rev_reg_delta_json = c_char_p(other_rev_reg_delta_json.encode('utf-8'))
merged_revoc_reg_delta_json = await do_call('indy_issuer_merge_revocation_registry_deltas',
c_rev_reg_delta_json,
c_other_rev_reg_delta_json,
issuer_merge_revocation_registry_deltas.cb)
res = merged_revoc_reg_delta_json.decode()
logger.debug("issuer_merge_revocation_registry_deltas: <<< res: %r", res)
return res
async def prover_create_master_secret(wallet_handle: int,
master_secret_name: Optional[str]) -> str:
"""
Creates a master secret with a given name and stores it in the wallet.
The name must be unique.
:param wallet_handle: wallet handle (created by open_wallet).
:param master_secret_name: (optional, if not present random one will be generated) new master id
:return: id of generated master secret.
"""
logger = logging.getLogger(__name__)
logger.debug("prover_create_master_secret: >>> wallet_handle: %r, master_secret_name: %r",
wallet_handle,
master_secret_name)
if not hasattr(prover_create_master_secret, "cb"):
logger.debug("prover_create_master_secret: Creating callback")
prover_create_master_secret.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_master_secret_name = c_char_p(master_secret_name.encode('utf-8')) if master_secret_name else None
out_master_secret_id = await do_call('indy_prover_create_master_secret',
c_wallet_handle,
c_master_secret_name,
prover_create_master_secret.cb)
res = out_master_secret_id.decode()
logger.debug("prover_create_master_secret: <<< res: %r", res)
return res
async def prover_create_credential_req(wallet_handle: int,
prover_did: str,
cred_offer_json: str,
cred_def_json: str,
master_secret_id: str) -> (str, str):
"""
Creates a clam request for the given credential offer.
The method creates a blinded master secret for a master secret identified by a provided name.
The master secret identified by the name must be already stored in the secure wallet (see prover_create_master_secret)
The blinded master secret is a part of the credential request.
:param wallet_handle: wallet handle (created by open_wallet).
:param prover_did: a DID of the prover
:param cred_offer_json: credential offer as a json containing information about the issuer and a credential
:param cred_def_json: credential definition json related to <cred_def_id> in <cred_offer_json>
:param master_secret_id: the id of the master secret stored in the wallet
:return:
cred_req_json: Credential request json for creation of credential by Issuer
{
"prover_did" : string,
"cred_def_id" : string,
// Fields below can depend on Cred Def type
"blinded_ms" : <blinded_master_secret>,
"blinded_ms_correctness_proof" : <blinded_ms_correctness_proof>,
"nonce": string
}
cred_req_metadata_json: Credential request metadata json for processing of received form Issuer credential.
Note: cred_req_metadata_json mustn't be shared with Issuer.
"""
logger = logging.getLogger(__name__)
logger.debug("prover_create_credential_req: >>> wallet_handle: %r, prover_did: %r, cred_offer_json: %r,"
" cred_def_json: %r, master_secret_id: %r",
wallet_handle,
prover_did,
cred_offer_json,
cred_def_json,
master_secret_id)
if not hasattr(prover_create_credential_req, "cb"):
logger.debug("prover_create_credential_req: Creating callback")
prover_create_credential_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_prover_did = c_char_p(prover_did.encode('utf-8'))
c_cred_offer_json = c_char_p(cred_offer_json.encode('utf-8'))
c_cred_def_json = c_char_p(cred_def_json.encode('utf-8'))
c_master_secret_id = c_char_p(master_secret_id.encode('utf-8'))
(credential_req_json, credential_req_metadata_json) = await do_call('indy_prover_create_credential_req',
c_wallet_handle,
c_prover_did,
c_cred_offer_json,
c_cred_def_json,
c_master_secret_id,
prover_create_credential_req.cb)
credential_req_json = credential_req_json.decode()
credential_req_metadata_json = credential_req_metadata_json.decode()
res = (credential_req_json, credential_req_metadata_json)
logger.debug("prover_create_credential_req: <<< res: %r", res)
return res
async def prover_store_credential(wallet_handle: int,
cred_id: Optional[str],
cred_req_metadata_json: str,
cred_json: str,
cred_def_json: str,
rev_reg_def_json: Optional[str]) -> str:
"""
Check credential provided by Issuer for the given credential request,
updates the credential by a master secret and stores in a secure wallet.
To support efficient search the following tags will be created for stored credential:
{
"schema_id": <credential schema id>,
"schema_issuer_did": <credential schema issuer did>,
"schema_name": <credential schema name>,
"schema_version": <credential schema version>,
"issuer_did": <credential issuer did>,
"cred_def_id": <credential definition id>,
"rev_reg_id": <credential revocation registry id>, # "None" as string if not present
// for every attribute in <credential values>
"attr::<attribute name>::marker": "1",
"attr::<attribute name>::value": <attribute raw value>,
}
:param wallet_handle: wallet handle (created by open_wallet).
:param cred_id: (optional, default is a random one) identifier by which credential will be stored in the wallet
:param cred_req_metadata_json: a credential request metadata created by prover_create_credential_req
:param cred_json: credential json received from issuer
:param cred_def_json: credential definition json related to <cred_def_id> in <cred_json>
:param rev_reg_def_json: revocation registry definition json related to <rev_reg_def_id> in <cred_json>
:return: cred_id: identifier by which credential is stored in the wallet
"""
logger = logging.getLogger(__name__)
logger.debug("prover_store_credential: >>> wallet_handle: %r, cred_id: %r, "
"cred_req_metadata_json: %r, cred_json: %r, cred_def_json: %r, rev_reg_def_json: %r",
wallet_handle,
cred_id,
cred_req_metadata_json,
cred_json,
cred_def_json,
rev_reg_def_json)
if not hasattr(prover_store_credential, "cb"):
logger.debug("prover_store_credential: Creating callback")
prover_store_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_id = c_char_p(cred_id.encode('utf-8')) if cred_id else None
c_cred_req_metadata_json = c_char_p(cred_req_metadata_json.encode('utf-8'))
c_cred_json = c_char_p(cred_json.encode('utf-8'))
c_cred_def_json = c_char_p(cred_def_json.encode('utf-8'))
c_rev_reg_def_json = c_char_p(rev_reg_def_json.encode('utf-8')) if rev_reg_def_json is not None else None
cred_id = await do_call('indy_prover_store_credential',
c_wallet_handle,
c_cred_id,
c_cred_req_metadata_json,
c_cred_json,
c_cred_def_json,
c_rev_reg_def_json,
prover_store_credential.cb)
res = cred_id.decode()
logger.debug("prover_store_credential: <<< res: %r", res)
return res
async def prover_get_credential(wallet_handle: int,
cred_id: str) -> str:
"""
Gets human readable credential by the given id.
:param wallet_handle: wallet handle (created by open_wallet).
:param cred_id: Identifier by which requested credential is stored in the wallet
:return: credential json
{
"referent": string, // cred_id in the wallet
"attrs": {"key1":"raw_value1", "key2":"raw_value2"},
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}
"""
logger = logging.getLogger(__name__)
logger.debug("prover_get_credential: >>> wallet_handle: %r, cred_id: %r",
wallet_handle,
cred_id)
if not hasattr(prover_get_credential, "cb"):
logger.debug("prover_get_credential: Creating callback")
prover_get_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_id = c_char_p(cred_id.encode('utf-8'))
credentials_json = await do_call('indy_prover_get_credential',
c_wallet_handle,
c_cred_id,
prover_get_credential.cb)
res = credentials_json.decode()
logger.debug("prover_get_credential: <<< res: %r", res)
return res
async def prover_delete_credential(wallet_handle: int,
cred_id: str) -> None:
"""
Delete identified credential from wallet.
:param wallet_handle: wallet handle (created by open_wallet).
:param cred_id: identifier by which wallet stores credential to delete
"""
logger = logging.getLogger(__name__)
logger.debug("prover_delete_credential: >>> wallet_handle: %r, cred_id: %r",
wallet_handle,
cred_id)
if not hasattr(prover_delete_credential, "cb"):
logger.debug("prover_delete_credential: Creating callback")
prover_delete_credential.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_wallet_handle = c_int32(wallet_handle)
c_cred_id = c_char_p(cred_id.encode('utf-8'))
await do_call('indy_prover_delete_credential',
c_wallet_handle,
c_cred_id,
prover_delete_credential.cb)
logger.debug("prover_delete_credential: <<<")
async def prover_get_credentials(wallet_handle: int,
filter_json: str) -> str:
"""
Gets human readable credentials according to the filter.
If filter is NULL, then all credentials are returned.
Credentials can be filtered by tags created during saving of credential.
NOTE: This method is deprecated because immediately returns all fetched credentials.
Use <prover_search_credentials> to fetch records by small batches.
:param wallet_handle: wallet handle (created by open_wallet).
:param filter_json: filter for credentials
{
"schema_id": string, (Optional)
"schema_issuer_did": string, (Optional)
"schema_name": string, (Optional)
"schema_version": string, (Optional)
"issuer_did": string, (Optional)
"cred_def_id": string, (Optional)
}
:return: credentials json
[{
"referent": string, // cred_id in the wallet
"attrs": {"key1":"raw_value1", "key2":"raw_value2"},
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}]
"""
logger = logging.getLogger(__name__)
logger.debug("prover_get_credentials: >>> wallet_handle: %r, filter_json: %r",
wallet_handle,
filter_json)
if not hasattr(prover_get_credentials, "cb"):
logger.debug("prover_get_credentials: Creating callback")
prover_get_credentials.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_filter_json = c_char_p(filter_json.encode('utf-8'))
credentials_json = await do_call('indy_prover_get_credentials',
c_wallet_handle,
c_filter_json,
prover_get_credentials.cb)
res = credentials_json.decode()
logger.debug("prover_get_credentials: <<< res: %r", res)
return res
async def prover_search_credentials(wallet_handle: int,
query_json: str) -> (int, int):
"""
Search for credentials stored in wallet.
Credentials can be filtered by tags created during saving of credential.
Instead of immediately returning of fetched credentials this call returns search_handle that can be used later
to fetch records by small batches (with prover_credentials_search_fetch_records).
:param wallet_handle: wallet handle (created by open_wallet).
:param query_json: wql style filter for credentials searching based on tags.
where wql query: indy-sdk/docs/design/011-wallet-query-language/README.md
:return:
search_handle: Search handle that can be used later to fetch records by small batches
(with prover_credentials_search_fetch_records)
total_count: Total count of records
"""
logger = logging.getLogger(__name__)
logger.debug("prover_search_credentials: >>> wallet_handle: %r, query_json: %r",
wallet_handle,
query_json)
if not hasattr(prover_search_credentials, "cb"):
logger.debug("prover_search_credentials: Creating callback")
prover_search_credentials.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_int32, c_uint))
c_wallet_handle = c_int32(wallet_handle)
c_query_json = c_char_p(query_json.encode('utf-8'))
res = await do_call('indy_prover_search_credentials',
c_wallet_handle,
c_query_json,
prover_search_credentials.cb)
logger.debug("prover_search_credentials: <<< res: %r", res)
return res
async def prover_fetch_credentials(search_handle: int,
count: int) -> str:
"""
Fetch next credentials for search.
:param search_handle: Search handle (created by prover_open_credentials_search)
:param count: Count of records to fetch
:return: credentials_json: List of credentials:
[{
"referent": string, // cred_id in the wallet
"attrs": {"key1":"raw_value1", "key2":"raw_value2"},
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<string>,
"cred_rev_id": Optional<string>
}]
NOTE: The list of length less than the requested count means credentials search iterator is completed.
"""
logger = logging.getLogger(__name__)
logger.debug("prover_fetch_credentials: >>> search_handle: %r, count: %r",
search_handle,
count)
if not hasattr(prover_fetch_credentials, "cb"):
logger.debug("prover_fetch_credentials: Creating callback")
prover_fetch_credentials.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_search_handle = c_int32(search_handle)
c_count = c_uint(count)
credentials_json = await do_call('indy_prover_fetch_credentials',
c_search_handle,
c_count,
prover_fetch_credentials.cb)
res = credentials_json.decode()
logger.debug("prover_fetch_credentials: <<< res: %r", res)
return res
async def prover_close_credentials_search(search_handle: int) -> None:
"""
Close credentials search (make search handle invalid)
:param search_handle: Search handle (created by prover_open_credentials_search)
:return: None
"""
logger = logging.getLogger(__name__)
logger.debug("prover_close_credentials_search: >>> search_handle: %r",
search_handle)
if not hasattr(prover_close_credentials_search, "cb"):
logger.debug("prover_close_credentials_search: Creating callback")
prover_close_credentials_search.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_search_handle = c_int32(search_handle)
res = await do_call('indy_prover_close_credentials_search',
c_search_handle,
prover_close_credentials_search.cb)
logger.debug("prover_close_credentials_search: <<< res: %r", res)
return res
async def prover_get_credentials_for_proof_req(wallet_handle: int,
proof_request_json: str) -> str:
"""
Gets human readable credentials matching the given proof request.
NOTE: This method is deprecated because immediately returns all fetched credentials.
Use <prover_search_credentials_for_proof_req> to fetch records by small batches.
:param wallet_handle: wallet handle (created by open_wallet).
:param proof_request_json: proof request json
{
"name": string,
"version": string,
"nonce": string,
"requested_attributes": { // set of requested attributes
"<attr_referent>": <attr_info>, // see below
...,
},
"requested_predicates": { // set of requested predicates
"<predicate_referent>": <predicate_info>, // see below
...,
},
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval for each attribute
// (can be overridden on attribute level)
}
where:
attr_referent: Proof-request local identifier of requested attribute
attr_info: Describes requested attribute
{
"name": string, // attribute name, (case insensitive and ignore spaces)
"restrictions": Optional<[<filter_json>]>, // see above
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
predicate_referent: Proof-request local identifier of requested attribute predicate
predicate_info: Describes requested attribute predicate
{
"name": attribute name, (case insensitive and ignore spaces)
"p_type": predicate type (Currently >= only)
"p_value": predicate value
"restrictions": Optional<[<filter_json>]>, // see above
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
non_revoc_interval: Defines non-revocation interval
{
"from": Optional<int>, // timestamp of interval beginning
"to": Optional<int>, // timestamp of interval ending
}
:return: json with credentials for the given proof request.
{
"requested_attrs": {
"<attr_referent>": [{ cred_info: <credential_info>, interval: Optional<non_revoc_interval> }],
...,
},
"requested_predicates": {
"requested_predicates": [{ cred_info: <credential_info>, timestamp: Optional<integer> }, { cred_info: <credential_2_info>, timestamp: Optional<integer> }],
"requested_predicate_2_referent": [{ cred_info: <credential_2_info>, timestamp: Optional<integer> }]
}
}, where credential is
{
"referent": <string>,
"attrs": [{"attr_name" : "attr_raw_value"}],
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<int>,
"cred_rev_id": Optional<int>,
}
"""
logger = logging.getLogger(__name__)
logger.debug("prover_get_credentials_for_proof_req: >>> wallet_handle: %r, proof_request_json: %r",
wallet_handle,
proof_request_json)
if not hasattr(prover_get_credentials_for_proof_req, "cb"):
logger.debug("prover_get_credentials_for_proof_req: Creating callback")
prover_get_credentials_for_proof_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_proof_request_json = c_char_p(proof_request_json.encode('utf-8'))
credentials_json = await do_call('indy_prover_get_credentials_for_proof_req',
c_wallet_handle,
c_proof_request_json,
prover_get_credentials_for_proof_req.cb)
res = credentials_json.decode()
logger.debug("prover_get_credentials_for_proof_req: <<< res: %r", res)
return res
async def prover_search_credentials_for_proof_req(wallet_handle: int,
proof_request_json: str,
extra_query_json: Optional[str]) -> int:
"""
Search for credentials matching the given proof request.
Instead of immediately returning of fetched credentials this call returns search_handle that can be used later
to fetch records by small batches (with prover_fetch_credentials_for_proof_req).
:param wallet_handle: wallet handle (created by open_wallet).
:param proof_request_json: proof request json
{
"name": string,
"version": string,
"nonce": string,
"requested_attributes": { // set of requested attributes
"<attr_referent>": <attr_info>, // see below
...,
},
"requested_predicates": { // set of requested predicates
"<predicate_referent>": <predicate_info>, // see below
...,
},
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval for each attribute
// (can be overridden on attribute level)
}
:param extra_query_json:(Optional) List of extra queries that will be applied to correspondent attribute/predicate:
{
"<attr_referent>": <wql query>,
"<predicate_referent>": <wql query>,
}
where wql query: indy-sdk/docs/design/011-wallet-query-language/README.md
:return: search_handle: Search handle that can be used later to fetch records by small batches (with prover_fetch_credentials_for_proof_req)
"""
logger = logging.getLogger(__name__)
logger.debug("prover_search_credentials_for_proof_req: >>> wallet_handle: %r, proof_request_json: %r, "
"extra_query_json: %r",
wallet_handle,
proof_request_json,
extra_query_json)
if not hasattr(prover_search_credentials_for_proof_req, "cb"):
logger.debug("prover_search_credentials_for_proof_req: Creating callback")
prover_search_credentials_for_proof_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_int32))
c_wallet_handle = c_int32(wallet_handle)
c_proof_request_json = c_char_p(proof_request_json.encode('utf-8'))
c_extra_query_json = c_char_p(extra_query_json.encode('utf-8')) if extra_query_json is not None else None
res = await do_call('indy_prover_search_credentials_for_proof_req',
c_wallet_handle,
c_proof_request_json,
c_extra_query_json,
prover_search_credentials_for_proof_req.cb)
logger.debug("prover_search_credentials_for_proof_req: <<< res: %r", res)
return res
async def prover_fetch_credentials_for_proof_req(search_handle: int,
item_referent: str,
count: int) -> str:
"""
Fetch next records for the requested item using proof request search handle (created by prover_search_credentials_for_proof_req).
:param search_handle: Search handle (created by prover_search_credentials_for_proof_req)
:param item_referent: Referent of attribute/predicate in the proof request
:param count: Count of records to fetch
:return: credentials_json: List of credentials for the given proof request.
[{
cred_info: <credential_info>,
interval: Optional<non_revoc_interval>
}]
where credential_info is
{
"referent": <string>,
"attrs": [{"attr_name" : "attr_raw_value"}],
"schema_id": string,
"cred_def_id": string,
"rev_reg_id": Optional<int>,
"cred_rev_id": Optional<int>,
}
NOTE: The list of length less than the requested count means that search iterator correspondent to the requested <item_referent> is completed.
"""
logger = logging.getLogger(__name__)
logger.debug("prover_fetch_credentials_for_proof_req: >>> search_handle: %r, item_referent: %r, count: %r",
search_handle,
item_referent,
count)
if not hasattr(prover_fetch_credentials_for_proof_req, "cb"):
logger.debug("prover_fetch_credentials_for_proof_req: Creating callback")
prover_fetch_credentials_for_proof_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_search_handle = c_int32(search_handle)
c_item_referent = c_char_p(item_referent.encode('utf-8'))
c_count = c_uint(count)
credentials_json = await do_call('indy_prover_fetch_credentials_for_proof_req',
c_search_handle,
c_item_referent,
c_count,
prover_fetch_credentials_for_proof_req.cb)
res = credentials_json.decode()
logger.debug("prover_fetch_credentials_for_proof_req: <<< res: %r", res)
return res
async def prover_close_credentials_search_for_proof_req(search_handle: int) -> None:
"""
Close credentials search for proof request (make search handle invalid)
:param search_handle: Search handle (created by prover_search_credentials_for_proof_req)
:return: None
"""
logger = logging.getLogger(__name__)
logger.debug("prover_close_credentials_search_for_proof_req: >>> search_handle: %r",
search_handle)
if not hasattr(prover_close_credentials_search_for_proof_req, "cb"):
logger.debug("prover_close_credentials_search_for_proof_req: Creating callback")
prover_close_credentials_search_for_proof_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_search_handle = c_int32(search_handle)
res = await do_call('indy_prover_close_credentials_search_for_proof_req',
c_search_handle,
prover_close_credentials_search_for_proof_req.cb)
logger.debug("prover_close_credentials_search_for_proof_req: <<< res: %r", res)
return res
async def prover_create_proof(wallet_handle: int,
proof_req_json: str,
requested_credentials_json: str,
master_secret_name: str,
schemas_json: str,
credential_defs_json: str,
rev_states_json: str) -> str:
"""
Creates a proof according to the given proof request
Either a corresponding credential with optionally revealed attributes or self-attested attribute must be provided
for each requested attribute (see indy_prover_get_credentials_for_pool_req).
A proof request may request multiple credentials from different schemas and different issuers.
All required schemas, public keys and revocation registries must be provided.
The proof request also contains nonce.
The proof contains either proof or self-attested attribute value for each requested attribute.
:param wallet_handle: wallet handle (created by open_wallet).
:param proof_req_json: proof request json
{
"name": string,
"version": string,
"nonce": string,
"requested_attributes": { // set of requested attributes
"<attr_referent>": <attr_info>, // see below
...,
},
"requested_predicates": { // set of requested predicates
"<predicate_referent>": <predicate_info>, // see below
...,
},
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval for each attribute
// (can be overridden on attribute level)
}
:param requested_credentials_json: either a credential or self-attested attribute for each requested attribute
{
"self_attested_attributes": {
"self_attested_attribute_referent": string
},
"requested_attributes": {
"requested_attribute_referent_1": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }},
"requested_attribute_referent_2": {"cred_id": string, "timestamp": Optional<number>, revealed: <bool> }}
},
"requested_predicates": {
"requested_predicates_referent_1": {"cred_id": string, "timestamp": Optional<number> }},
}
}
:param master_secret_name: the id of the master secret stored in the wallet
:param schemas_json: all schemas json participating in the proof request
{
<schema1_id>: <schema1_json>,
<schema2_id>: <schema2_json>,
<schema3_id>: <schema3_json>,
}
:param credential_defs_json: all credential definitions json participating in the proof request
{
"cred_def1_id": <credential_def1_json>,
"cred_def2_id": <credential_def2_json>,
"cred_def3_id": <credential_def3_json>,
}
:param rev_states_json: all revocation states json participating in the proof request
{
"rev_reg_def1_id": {
"timestamp1": <rev_state1>,
"timestamp2": <rev_state2>,
},
"rev_reg_def2_id": {
"timestamp3": <rev_state3>
},
"rev_reg_def3_id": {
"timestamp4": <rev_state4>
},
}
where
wql query: indy-sdk/docs/design/011-wallet-query-language/README.md
attr_referent: Proof-request local identifier of requested attribute
attr_info: Describes requested attribute
{
"name": string, // attribute name, (case insensitive and ignore spaces)
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
predicate_referent: Proof-request local identifier of requested attribute predicate
predicate_info: Describes requested attribute predicate
{
"name": attribute name, (case insensitive and ignore spaces)
"p_type": predicate type (Currently >= only)
"p_value": predicate value
"restrictions": Optional<[<wql query>]>,
// if specified, credential must satisfy to one of the given restriction.
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval this attribute
// (overrides proof level interval)
}
non_revoc_interval: Defines non-revocation interval
{
"from": Optional<int>, // timestamp of interval beginning
"to": Optional<int>, // timestamp of interval ending
}
:return: Proof json
For each requested attribute either a proof (with optionally revealed attribute value) or
self-attested attribute value is provided.
Each proof is associated with a credential and corresponding schema_id, cred_def_id, rev_reg_id and timestamp.
There is also aggregated proof part common for all credential proofs.
{
"requested_proof": {
"revealed_attrs": {
"requested_attr1_id": {sub_proof_index: number, raw: string, encoded: string},
"requested_attr4_id": {sub_proof_index: number: string, encoded: string},
},
"unrevealed_attrs": {
"requested_attr3_id": {sub_proof_index: number}
},
"self_attested_attrs": {
"requested_attr2_id": self_attested_value,
},
"requested_predicates": {
"requested_predicate_1_referent": {sub_proof_index: int},
"requested_predicate_2_referent": {sub_proof_index: int},
}
}
"proof": {
"proofs": [ <credential_proof>, <credential_proof>, <credential_proof> ],
"aggregated_proof": <aggregated_proof>
}
"identifiers": [{schema_id, cred_def_id, Optional<rev_reg_id>, Optional<timestamp>}]
}
"""
logger = logging.getLogger(__name__)
logger.debug("prover_create_proof: >>> wallet_handle: %r, proof_req_json: %r, requested_credentials_json: %r, "
"schemas_json: %r, master_secret_name: %r, credential_defs_json: %r, rev_infos_json: %r",
wallet_handle,
proof_req_json,
requested_credentials_json,
schemas_json,
master_secret_name,
credential_defs_json,
rev_states_json)
if not hasattr(prover_create_proof, "cb"):
logger.debug("prover_create_proof: Creating callback")
prover_create_proof.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_proof_req_json = c_char_p(proof_req_json.encode('utf-8'))
c_requested_credentials_json = c_char_p(requested_credentials_json.encode('utf-8'))
c_schemas_json = c_char_p(schemas_json.encode('utf-8'))
c_master_secret_name = c_char_p(master_secret_name.encode('utf-8'))
c_credential_defs_json = c_char_p(credential_defs_json.encode('utf-8'))
c_rev_infos_json = c_char_p(rev_states_json.encode('utf-8'))
proof_json = await do_call('indy_prover_create_proof',
c_wallet_handle,
c_proof_req_json,
c_requested_credentials_json,
c_master_secret_name,
c_schemas_json,
c_credential_defs_json,
c_rev_infos_json,
prover_create_proof.cb)
res = proof_json.decode()
logger.debug("prover_create_proof: <<< res: %r", res)
return res
async def verifier_verify_proof(proof_request_json: str,
proof_json: str,
schemas_json: str,
credential_defs_json: str,
rev_reg_defs_json: str,
rev_regs_json: str) -> bool:
"""
Verifies a proof (of multiple credential).
All required schemas, public keys and revocation registries must be provided.
:param proof_request_json:
{
"name": string,
"version": string,
"nonce": string,
"requested_attributes": { // set of requested attributes
"<attr_referent>": <attr_info>, // see below
...,
},
"requested_predicates": { // set of requested predicates
"<predicate_referent>": <predicate_info>, // see below
...,
},
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval for each attribute
// (can be overridden on attribute level)
}
:param proof_json: created for request proof json
{
"requested_proof": {
"revealed_attrs": {
"requested_attr1_id": {sub_proof_index: number, raw: string, encoded: string},
"requested_attr4_id": {sub_proof_index: number: string, encoded: string},
},
"unrevealed_attrs": {
"requested_attr3_id": {sub_proof_index: number}
},
"self_attested_attrs": {
"requested_attr2_id": self_attested_value,
},
"requested_predicates": {
"requested_predicate_1_referent": {sub_proof_index: int},
"requested_predicate_2_referent": {sub_proof_index: int},
}
}
"proof": {
"proofs": [ <credential_proof>, <credential_proof>, <credential_proof> ],
"aggregated_proof": <aggregated_proof>
}
"identifiers": [{schema_id, cred_def_id, Optional<rev_reg_id>, Optional<timestamp>}]
}
:param schemas_json: all schema jsons participating in the proof
{
<schema1_id>: <schema1_json>,
<schema2_id>: <schema2_json>,
<schema3_id>: <schema3_json>,
}
:param credential_defs_json: all credential definitions json participating in the proof
{
"cred_def1_id": <credential_def1_json>,
"cred_def2_id": <credential_def2_json>,
"cred_def3_id": <credential_def3_json>,
}
:param rev_reg_defs_json: all revocation registry definitions json participating in the proof
{
"rev_reg_def1_id": <rev_reg_def1_json>,
"rev_reg_def2_id": <rev_reg_def2_json>,
"rev_reg_def3_id": <rev_reg_def3_json>,
}
:param rev_regs_json: all revocation registries json participating in the proof
{
"rev_reg_def1_id": {
"timestamp1": <rev_reg1>,
"timestamp2": <rev_reg2>,
},
"rev_reg_def2_id": {
"timestamp3": <rev_reg3>
},
"rev_reg_def3_id": {
"timestamp4": <rev_reg4>
},
}
:return: valid: true - if signature is valid, false - otherwise
"""
logger = logging.getLogger(__name__)
logger.debug("verifier_verify_proof: >>> proof_request_json: %r, proof_json: %r, schemas_json: %r, "
"credential_defs_jsons: %r, rev_reg_defs_json: %r, rev_regs_json: %r",
proof_request_json,
proof_json,
schemas_json,
credential_defs_json,
rev_reg_defs_json,
rev_regs_json)
if not hasattr(verifier_verify_proof, "cb"):
logger.debug("verifier_verify_proof: Creating callback")
verifier_verify_proof.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_bool))
c_proof_request_json = c_char_p(proof_request_json.encode('utf-8'))
c_proof_json = c_char_p(proof_json.encode('utf-8'))
c_schemas_json = c_char_p(schemas_json.encode('utf-8'))
c_credential_defs_json = c_char_p(credential_defs_json.encode('utf-8'))
c_rev_reg_defs_json = c_char_p(rev_reg_defs_json.encode('utf-8'))
c_rev_regs_json = c_char_p(rev_regs_json.encode('utf-8'))
res = await do_call('indy_verifier_verify_proof',
c_proof_request_json,
c_proof_json,
c_schemas_json,
c_credential_defs_json,
c_rev_reg_defs_json,
c_rev_regs_json,
verifier_verify_proof.cb)
logger.debug("verifier_verify_proof: <<< res: %r", res)
return res
async def create_revocation_state(blob_storage_reader_handle: int,
rev_reg_def_json: str,
rev_reg_delta_json: str,
timestamp: int,
cred_rev_id: str) -> str:
"""
Create revocation state for a credential in the particular time moment.
:param blob_storage_reader_handle: configuration of blob storage reader handle that will allow to read revocation tails
:param rev_reg_def_json: revocation registry definition json
:param rev_reg_delta_json: revocation registry definition delta json
:param timestamp: time represented as a total number of seconds from Unix Epoch
:param cred_rev_id: user credential revocation id in revocation registry
:return: revocation state json {
"rev_reg": <revocation registry>,
"witness": <witness>,
"timestamp" : integer
}
"""
logger = logging.getLogger(__name__)
logger.debug("create_revocation_info: >>> blob_storage_reader_handle: %r, rev_reg_def_json: %r,"
" rev_reg_delta_json: %r, timestamp: %r, cred_rev_id: %r",
blob_storage_reader_handle,
rev_reg_def_json,
rev_reg_delta_json,
timestamp,
cred_rev_id)
if not hasattr(create_revocation_state, "cb"):
logger.debug("create_revocation_state: Creating callback")
create_revocation_state.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle)
c_rev_reg_def_json = c_char_p(rev_reg_def_json.encode('utf-8'))
c_rev_reg_delta_json = c_char_p(rev_reg_delta_json.encode('utf-8'))
c_timestamp = c_uint64(timestamp)
c_cred_rev_id = c_char_p(cred_rev_id.encode('utf-8'))
rev_state_json = await do_call('indy_create_revocation_state',
c_blob_storage_reader_handle,
c_rev_reg_def_json,
c_rev_reg_delta_json,
c_timestamp,
c_cred_rev_id,
create_revocation_state.cb)
res = rev_state_json.decode()
logger.debug("create_revocation_state: <<< res: %r", res)
return res
async def update_revocation_state(blob_storage_reader_handle: int,
rev_state_json: str,
rev_reg_def_json: str,
rev_reg_delta_json: str,
timestamp: int,
cred_rev_id: str) -> str:
"""
Create new revocation state for a credential based on existed state
at the particular time moment (to reduce calculation time).
:param blob_storage_reader_handle: configuration of blob storage reader handle that will allow to read revocation tails
:param rev_state_json: revocation registry state json
:param rev_reg_def_json: revocation registry definition json
:param rev_reg_delta_json: revocation registry definition delta json
:param timestamp: time represented as a total number of seconds from Unix Epoch
:param cred_rev_id: user credential revocation id in revocation registry
:return: revocation state json {
"rev_reg": <revocation registry>,
"witness": <witness>,
"timestamp" : integer
}
"""
logger = logging.getLogger(__name__)
logger.debug("update_revocation_state: >>> blob_storage_reader_handle: %r, rev_state_json: %r, "
"rev_reg_def_json: %r, rev_reg_delta_json: %r, timestamp: %r, cred_rev_id: %r",
blob_storage_reader_handle,
rev_state_json,
rev_reg_def_json,
rev_reg_delta_json,
timestamp,
cred_rev_id)
if not hasattr(update_revocation_state, "cb"):
logger.debug("update_revocation_state: Creating callback")
update_revocation_state.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_blob_storage_reader_handle = c_int32(blob_storage_reader_handle)
c_rev_state_json = c_char_p(rev_state_json.encode('utf-8'))
c_rev_reg_def_json = c_char_p(rev_reg_def_json.encode('utf-8'))
c_rev_reg_delta_json = c_char_p(rev_reg_delta_json.encode('utf-8'))
c_timestamp = c_uint64(timestamp)
c_cred_rev_id = c_char_p(cred_rev_id.encode('utf-8'))
updated_rev_state_json = await do_call('indy_update_revocation_state',
c_blob_storage_reader_handle,
c_rev_state_json,
c_rev_reg_def_json,
c_rev_reg_delta_json,
c_timestamp,
c_cred_rev_id,
update_revocation_state.cb)
res = updated_rev_state_json.decode()
logger.debug("update_revocation_state: <<< res: %r", res)
return res
| 47.793883
| 176
| 0.613978
|
642721c1b07336bb142e6ecb3f0041755603ebf3
| 14,633
|
py
|
Python
|
mypy/test/testcheck.py
|
Zac-HD/mypy
|
d5b66a8411036d300e8477be6d1bd40bd28ada28
|
[
"PSF-2.0"
] | null | null | null |
mypy/test/testcheck.py
|
Zac-HD/mypy
|
d5b66a8411036d300e8477be6d1bd40bd28ada28
|
[
"PSF-2.0"
] | 1
|
2021-05-07T15:53:17.000Z
|
2021-05-07T18:43:12.000Z
|
mypy/test/testcheck.py
|
Zac-HD/mypy
|
d5b66a8411036d300e8477be6d1bd40bd28ada28
|
[
"PSF-2.0"
] | null | null | null |
"""Type checker test cases"""
import os
import re
import sys
from typing import Dict, List, Set, Tuple
from mypy import build
from mypy.build import Graph
from mypy.modulefinder import BuildSource, SearchPaths, FindModuleCache
from mypy.test.config import test_temp_dir, test_data_prefix
from mypy.test.data import (
DataDrivenTestCase, DataSuite, FileOperation, module_from_path
)
from mypy.test.helpers import (
assert_string_arrays_equal, normalize_error_messages, assert_module_equivalence,
update_testcase_output, parse_options,
assert_target_equivalence, check_test_output_files, perform_file_operations,
)
from mypy.errors import CompileError
from mypy.semanal_main import core_modules
# List of files that contain test case descriptions.
typecheck_files = [
'check-basic.test',
'check-union-or-syntax.test',
'check-callable.test',
'check-classes.test',
'check-statements.test',
'check-generics.test',
'check-dynamic-typing.test',
'check-inference.test',
'check-inference-context.test',
'check-kwargs.test',
'check-overloading.test',
'check-type-checks.test',
'check-abstract.test',
'check-multiple-inheritance.test',
'check-super.test',
'check-modules.test',
'check-typevar-values.test',
'check-unsupported.test',
'check-unreachable-code.test',
'check-unions.test',
'check-isinstance.test',
'check-lists.test',
'check-namedtuple.test',
'check-narrowing.test',
'check-typeddict.test',
'check-type-aliases.test',
'check-ignore.test',
'check-type-promotion.test',
'check-semanal-error.test',
'check-flags.test',
'check-incremental.test',
'check-serialize.test',
'check-bound.test',
'check-optional.test',
'check-fastparse.test',
'check-warnings.test',
'check-async-await.test',
'check-newtype.test',
'check-class-namedtuple.test',
'check-selftype.test',
'check-python2.test',
'check-columns.test',
'check-functions.test',
'check-tuples.test',
'check-expressions.test',
'check-generic-subtyping.test',
'check-varargs.test',
'check-newsyntax.test',
'check-protocols.test',
'check-underscores.test',
'check-classvar.test',
'check-enum.test',
'check-incomplete-fixture.test',
'check-custom-plugin.test',
'check-default-plugin.test',
'check-attr.test',
'check-ctypes.test',
'check-dataclasses.test',
'check-final.test',
'check-redefine.test',
'check-literal.test',
'check-newsemanal.test',
'check-inline-config.test',
'check-reports.test',
'check-errorcodes.test',
'check-annotated.test',
'check-parameter-specification.test',
'check-generic-alias.test',
'check-typeguard.test',
'check-functools.test',
'check-singledispatch.test',
'check-slots.test',
'check-formatting.test',
]
# Tests that use Python 3.8-only AST features (like expression-scoped ignores):
if sys.version_info >= (3, 8):
typecheck_files.append('check-python38.test')
if sys.version_info >= (3, 9):
typecheck_files.append('check-python39.test')
# Special tests for platforms with case-insensitive filesystems.
if sys.platform in ('darwin', 'win32'):
typecheck_files.extend(['check-modules-case.test'])
class TypeCheckSuite(DataSuite):
files = typecheck_files
def run_case(self, testcase: DataDrivenTestCase) -> None:
incremental = ('incremental' in testcase.name.lower()
or 'incremental' in testcase.file
or 'serialize' in testcase.file)
if incremental:
# Incremental tests are run once with a cold cache, once with a warm cache.
# Expect success on first run, errors from testcase.output (if any) on second run.
num_steps = max([2] + list(testcase.output2.keys()))
# Check that there are no file changes beyond the last run (they would be ignored).
for dn, dirs, files in os.walk(os.curdir):
for file in files:
m = re.search(r'\.([2-9])$', file)
if m and int(m.group(1)) > num_steps:
raise ValueError(
'Output file {} exists though test case only has {} runs'.format(
file, num_steps))
steps = testcase.find_steps()
for step in range(1, num_steps + 1):
idx = step - 2
ops = steps[idx] if idx < len(steps) and idx >= 0 else []
self.run_case_once(testcase, ops, step)
else:
self.run_case_once(testcase)
def run_case_once(self, testcase: DataDrivenTestCase,
operations: List[FileOperation] = [],
incremental_step: int = 0) -> None:
original_program_text = '\n'.join(testcase.input)
module_data = self.parse_module(original_program_text, incremental_step)
# Unload already loaded plugins, they may be updated.
for file, _ in testcase.files:
module = module_from_path(file)
if module.endswith('_plugin') and module in sys.modules:
del sys.modules[module]
if incremental_step == 0 or incremental_step == 1:
# In run 1, copy program text to program file.
for module_name, program_path, program_text in module_data:
if module_name == '__main__':
with open(program_path, 'w', encoding='utf8') as f:
f.write(program_text)
break
elif incremental_step > 1:
# In runs 2+, copy *.[num] files to * files.
perform_file_operations(operations)
# Parse options after moving files (in case mypy.ini is being moved).
options = parse_options(original_program_text, testcase, incremental_step)
options.use_builtins_fixtures = True
options.show_traceback = True
# Enable some options automatically based on test file name.
if 'optional' in testcase.file:
options.strict_optional = True
if 'columns' in testcase.file:
options.show_column_numbers = True
if 'errorcodes' in testcase.file:
options.show_error_codes = True
if incremental_step and options.incremental:
# Don't overwrite # flags: --no-incremental in incremental test cases
options.incremental = True
else:
options.incremental = False
# Don't waste time writing cache unless we are specifically looking for it
if not testcase.writescache:
options.cache_dir = os.devnull
sources = []
for module_name, program_path, program_text in module_data:
# Always set to none so we're forced to reread the module in incremental mode
sources.append(BuildSource(program_path, module_name,
None if incremental_step else program_text))
plugin_dir = os.path.join(test_data_prefix, 'plugins')
sys.path.insert(0, plugin_dir)
res = None
try:
res = build.build(sources=sources,
options=options,
alt_lib_path=test_temp_dir)
a = res.errors
except CompileError as e:
a = e.messages
finally:
assert sys.path[0] == plugin_dir
del sys.path[0]
if testcase.normalize_output:
a = normalize_error_messages(a)
# Make sure error messages match
if incremental_step == 0:
# Not incremental
msg = 'Unexpected type checker output ({}, line {})'
output = testcase.output
elif incremental_step == 1:
msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})'
output = testcase.output
elif incremental_step > 1:
msg = ('Unexpected type checker output in incremental, run {}'.format(
incremental_step) + ' ({}, line {})')
output = testcase.output2.get(incremental_step, [])
else:
raise AssertionError()
if output != a and testcase.config.getoption('--update-data', False):
update_testcase_output(testcase, a)
assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line))
if res:
if options.cache_dir != os.devnull:
self.verify_cache(module_data, res.errors, res.manager, res.graph)
name = 'targets'
if incremental_step:
name += str(incremental_step + 1)
expected = testcase.expected_fine_grained_targets.get(incremental_step + 1)
actual = res.manager.processed_targets
# Skip the initial builtin cycle.
actual = [t for t in actual
if not any(t.startswith(mod)
for mod in core_modules + ['mypy_extensions'])]
if expected is not None:
assert_target_equivalence(name, expected, actual)
if incremental_step > 1:
suffix = '' if incremental_step == 2 else str(incremental_step - 1)
expected_rechecked = testcase.expected_rechecked_modules.get(incremental_step - 1)
if expected_rechecked is not None:
assert_module_equivalence(
'rechecked' + suffix,
expected_rechecked, res.manager.rechecked_modules)
expected_stale = testcase.expected_stale_modules.get(incremental_step - 1)
if expected_stale is not None:
assert_module_equivalence(
'stale' + suffix,
expected_stale, res.manager.stale_modules)
if testcase.output_files:
check_test_output_files(testcase, incremental_step, strip_prefix='tmp/')
def verify_cache(self, module_data: List[Tuple[str, str, str]], a: List[str],
manager: build.BuildManager, graph: Graph) -> None:
# There should be valid cache metadata for each module except
# for those that had an error in themselves or one of their
# dependencies.
error_paths = self.find_error_message_paths(a)
busted_paths = {m.path for id, m in manager.modules.items()
if graph[id].transitive_error}
modules = self.find_module_files(manager)
modules.update({module_name: path for module_name, path, text in module_data})
missing_paths = self.find_missing_cache_files(modules, manager)
# We would like to assert error_paths.issubset(busted_paths)
# but this runs into trouble because while some 'notes' are
# really errors that cause an error to be marked, many are
# just notes attached to other errors.
assert error_paths or not busted_paths, "Some modules reported error despite no errors"
if not missing_paths == busted_paths:
raise AssertionError("cache data discrepancy %s != %s" %
(missing_paths, busted_paths))
assert os.path.isfile(os.path.join(manager.options.cache_dir, ".gitignore"))
cachedir_tag = os.path.join(manager.options.cache_dir, "CACHEDIR.TAG")
assert os.path.isfile(cachedir_tag)
with open(cachedir_tag) as f:
assert f.read().startswith("Signature: 8a477f597d28d172789f06886806bc55")
def find_error_message_paths(self, a: List[str]) -> Set[str]:
hits = set()
for line in a:
m = re.match(r'([^\s:]+):(\d+:)?(\d+:)? (error|warning|note):', line)
if m:
p = m.group(1)
hits.add(p)
return hits
def find_module_files(self, manager: build.BuildManager) -> Dict[str, str]:
modules = {}
for id, module in manager.modules.items():
modules[id] = module.path
return modules
def find_missing_cache_files(self, modules: Dict[str, str],
manager: build.BuildManager) -> Set[str]:
ignore_errors = True
missing = {}
for id, path in modules.items():
meta = build.find_cache_meta(id, path, manager)
if not build.validate_meta(meta, id, path, ignore_errors, manager):
missing[id] = path
return set(missing.values())
def parse_module(self,
program_text: str,
incremental_step: int = 0) -> List[Tuple[str, str, str]]:
"""Return the module and program names for a test case.
Normally, the unit tests will parse the default ('__main__')
module and follow all the imports listed there. You can override
this behavior and instruct the tests to check multiple modules
by using a comment like this in the test case input:
# cmd: mypy -m foo.bar foo.baz
You can also use `# cmdN:` to have a different cmd for incremental
step N (2, 3, ...).
Return a list of tuples (module name, file name, program text).
"""
m = re.search('# cmd: mypy -m ([a-zA-Z0-9_. ]+)$', program_text, flags=re.MULTILINE)
if incremental_step > 1:
alt_regex = '# cmd{}: mypy -m ([a-zA-Z0-9_. ]+)$'.format(incremental_step)
alt_m = re.search(alt_regex, program_text, flags=re.MULTILINE)
if alt_m is not None:
# Optionally return a different command if in a later step
# of incremental mode, otherwise default to reusing the
# original cmd.
m = alt_m
if m:
# The test case wants to use a non-default main
# module. Look up the module and give it as the thing to
# analyze.
module_names = m.group(1)
out = []
search_paths = SearchPaths((test_temp_dir,), (), (), ())
cache = FindModuleCache(search_paths, fscache=None, options=None)
for module_name in module_names.split(' '):
path = cache.find_module(module_name)
assert isinstance(path, str), "Can't find ad hoc case file: %s" % module_name
with open(path, encoding='utf8') as f:
program_text = f.read()
out.append((module_name, path, program_text))
return out
else:
return [('__main__', 'main', program_text)]
| 41.571023
| 98
| 0.607668
|
2ee1d06ebb42414858bd38780c187452087e6612
| 2,778
|
py
|
Python
|
airflow/plugins/operators/code_analyzer/utils/tests/test_evaluation.py
|
teiresias-personal-data-discovery/teiresias-system
|
50e9d08d6924480f120d2d4f9fbebdc6035a5c5b
|
[
"MIT"
] | 2
|
2021-09-06T17:32:48.000Z
|
2022-02-24T19:58:41.000Z
|
airflow/plugins/operators/code_analyzer/utils/tests/test_evaluation.py
|
teiresias-personal-data-discovery/teiresias-system
|
50e9d08d6924480f120d2d4f9fbebdc6035a5c5b
|
[
"MIT"
] | null | null | null |
airflow/plugins/operators/code_analyzer/utils/tests/test_evaluation.py
|
teiresias-personal-data-discovery/teiresias-system
|
50e9d08d6924480f120d2d4f9fbebdc6035a5c5b
|
[
"MIT"
] | null | null | null |
import pytest
from operators.code_analyzer.utils.analysis.evaluation import get_common_storage_environment, evaluate_traces, join_facts
from operators.code_analyzer.utils.tests.mocks.intermediate_results import resolved
@pytest.mark.parametrize("storage, tool, env, environment_occurrance", [
('postgres', 'docker', {
'POSTGRES_USER': 'this--user',
'POSTGRES_PASSWORD': 'that--pw',
'POSTGRES_UNSURE': 'TRUE',
}, {
'POSTGRES_PASSWORD': 'that--pw',
'POSTGRES_USER': 'this--user'
}),
])
def test_get_common_storage_environment(storage, tool, env,
environment_occurrance):
assert get_common_storage_environment(storage, tool,
env) == environment_occurrance
@pytest.mark.parametrize(
"traces, environment, storage, storage_modules, tool, joined_facts", [
({
'host': '127.0.0.1',
'login_user': 'admin',
'port': "9876"
}, {
'POSTGRES_PASSWORD': 'secure'
}, 'mongodb', ['mongodb_user'], 'ansible', {
'host': '127.0.0.1',
'port': '9876',
'user': 'admin'
}),
])
def test_join_facts(traces, environment, storage, storage_modules, tool,
joined_facts):
assert join_facts(traces, environment, storage, storage_modules,
tool) == joined_facts
@pytest.mark.parametrize("resolved, environment, connection_items", [
(resolved, {}, {
'postgres-/thesis-analyzeMe.git_2021-05-16_15:38:48/docker-compose.yaml.postgres':
{
'values': {
'port': '5432',
'user': 'admin',
'db': 'test'
},
'source':
'/thesis-analyzeMe.git_2021-05-16_15:38:48/docker-compose.yaml',
'storage_type': 'postgres'
},
'reporting-db-/thesis-analyzeMe.git_2021-05-16_15:38:48/docker-compose.yaml.mongodb':
{
'values': {},
'source':
'/thesis-analyzeMe.git_2021-05-16_15:38:48/docker-compose.yaml',
'storage_type': 'mongodb'
},
'webservers-/thesis-analyzeMe.git_2021-05-16_15:38:48/playbooks/4_pg.yml.postgres':
{
'values': {
'port': '5432',
'user': 'django',
'db': 'myapp',
'password': 'mysupersecretpassword'
},
'source':
'/thesis-analyzeMe.git_2021-05-16_15:38:48/playbooks/4_pg.yml',
'storage_type': 'postgres'
}
}),
])
def test_evaluate_traces(resolved, environment, connection_items):
assert evaluate_traces(resolved, environment) == connection_items
| 35.615385
| 121
| 0.560115
|
cee6ea35cd28dd1783f2502385c2f5b3d0c09ff4
| 4,368
|
py
|
Python
|
ros2doctor/ros2doctor/api/platform.py
|
LoyVanBeek/ros2cli
|
93e717c042a06d21e267a89a7de7780335acb323
|
[
"Apache-2.0"
] | null | null | null |
ros2doctor/ros2doctor/api/platform.py
|
LoyVanBeek/ros2cli
|
93e717c042a06d21e267a89a7de7780335acb323
|
[
"Apache-2.0"
] | null | null | null |
ros2doctor/ros2doctor/api/platform.py
|
LoyVanBeek/ros2cli
|
93e717c042a06d21e267a89a7de7780335acb323
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
from typing import Tuple
from ros2doctor.api import DoctorCheck
from ros2doctor.api import DoctorReport
from ros2doctor.api import Report
from ros2doctor.api import Result
from ros2doctor.api.format import doctor_warn
import rosdistro
def _check_platform_helper() -> Tuple[str, dict, dict]:
"""
Check ROS_DISTRO environment variables and distribution installed.
:return: string of distro name, dict of distribution info, dict of release platforms info
"""
distro_name = os.environ.get('ROS_DISTRO')
if not distro_name:
doctor_warn('ROS_DISTRO is not set.')
return
else:
distro_name = distro_name.lower()
u = rosdistro.get_index_url()
if not u:
doctor_warn('Unable to access ROSDISTRO_INDEX_URL or DEFAULT_INDEX_URL.')
return
i = rosdistro.get_index(u)
distro_info = i.distributions.get(distro_name)
if not distro_info:
doctor_warn("Distribution name '%s' is not found" % distro_name)
return
distro_data = rosdistro.get_distribution(i, distro_name).get_data()
return distro_name, distro_info, distro_data
class PlatformCheck(DoctorCheck):
"""Check system platform against ROSDistro."""
def category(self):
return 'platform'
def check(self):
"""Check system platform against ROS 2 Distro."""
result = Result()
distros = _check_platform_helper()
if not distros:
result.add_error('ERROR: Missing rosdistro info. Unable to check platform.')
return result
distro_name, distro_info, _ = distros
# check distro status
if distro_info.get('distribution_status') == 'prerelease':
result.add_warning('Distribution %s is not fully supported or tested. '
'To get more consistent features, download a stable version at '
'https://index.ros.org/doc/ros2/Installation/' % distro_name)
elif distro_info.get('distribution_status') == 'end-of-life':
result.add_warning('Distribution %s is no longer supported or deprecated. '
'To get the latest features, download the new versions at '
'https://index.ros.org/doc/ros2/Installation/' % distro_name)
return result
class PlatformReport(DoctorReport):
"""Output platform report."""
def category(self):
return 'platform'
def report(self):
platform_name = platform.system()
# platform info
platform_report = Report('PLATFORM INFORMATION')
platform_report.add_to_report('system', platform_name)
platform_report.add_to_report('platform info', platform.platform())
if platform_name == 'Darwin':
platform_report.add_to_report('mac OS version', platform.mac_ver())
platform_report.add_to_report('release', platform.release())
platform_report.add_to_report('processor', platform.processor())
return platform_report
class RosdistroReport(DoctorReport):
"""Output ROSDistro report."""
def category(self):
return 'platform'
def report(self):
distros = _check_platform_helper()
if not distros:
return
distro_name, distro_info, distro_data = distros
ros_report = Report('ROS 2 INFORMATION')
ros_report.add_to_report('distribution name', distro_name)
ros_report.add_to_report('distribution type', distro_info.get('distribution_type'))
ros_report.add_to_report('distribution status', distro_info.get('distribution_status'))
ros_report.add_to_report('release platforms', distro_data.get('release_platforms'))
return ros_report
| 37.333333
| 95
| 0.684066
|
1dd1bd97ce77b0acbe5c07fb4da3c59dc6079fe9
| 10,244
|
py
|
Python
|
data/external/repositories_2to3/267667/kaggle-heart-master/configurations/j6_4ch_128mm_specialist.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/267667/kaggle-heart-master/configurations/j6_4ch_128mm_specialist.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/267667/kaggle-heart-master/configurations/j6_4ch_128mm_specialist.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 1
num_epochs_train = 470
# - learning rate and method
base_lr = .0001
learning_rate_schedule = {
0: base_lr,
num_epochs_train*9/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
def filter_samples(folders):
# don't use patients who don't have 4ch
import glob
import os
import pickle as pickle
from paths import TEMP_FILES_PATH
d = pickle.load(open(TEMP_FILES_PATH+"pkl_train_slice2roi.pkl"))
d.update(pickle.load(open(TEMP_FILES_PATH+"pkl_validate_slice2roi.pkl")))
c = pickle.load(open(TEMP_FILES_PATH+"pkl_train_metadata.pkl"))
c.update(pickle.load(open(TEMP_FILES_PATH+"pkl_validate_metadata.pkl")))
def has_4ch(f):
ch_slices = glob.glob(f+"/4ch_*.pkl")
if len(ch_slices) > 0:
patient_id = str(data_loader._extract_id_from_path(ch_slices[0]))
slice_name = os.path.basename(ch_slices[0])
heart_size = max(float(d[patient_id][slice_name]['roi_radii'][0]) / c[patient_id][slice_name]['PixelSpacing'][0],
float(d[patient_id][slice_name]['roi_radii'][1]) / c[patient_id][slice_name]['PixelSpacing'][1])
return (heart_size>=32)
else:
return False
return [folder for folder in folders if has_4ch(folder)]
use_hough_roi = True # use roi to center patches
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(256,256)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 64
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:2ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:4ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 1000 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:singleslice:4ch"]
l0 = nn.layers.InputLayer(input_size)
l1a = nn.layers.dnn.Conv2DDNNLayer(l0, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3 = nn.layers.DenseLayer(ldsys2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
ldsys3drop = nn.layers.dropout(ldsys3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
ldsys3dropnorm = layers.NormalisationLayer(ldsys3drop)
l_systole = layers.CumSumLayer(ldsys3dropnorm)
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3 = nn.layers.DenseLayer(lddia2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
lddia3drop = nn.layers.dropout(lddia3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
lddia3dropnorm = layers.NormalisationLayer(lddia3drop)
l_diastole = layers.CumSumLayer(lddia3dropnorm)
return {
"inputs":{
"sliced:data:singleslice:4ch": l0
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
ldsys3: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
lddia3: l2_weight_out,
},
}
| 46.352941
| 176
| 0.707341
|
b34c95bbc0de9b7f54878b51c96c51a1c6ad963b
| 3,141
|
py
|
Python
|
src/allconv.py
|
mayuanyang/doggy
|
dea7a0033a721636d423c7b7ed7344c88bf4fd6e
|
[
"MIT"
] | 3
|
2017-11-19T04:56:51.000Z
|
2017-12-21T01:28:42.000Z
|
src/allconv.py
|
mayuanyang/doggy
|
dea7a0033a721636d423c7b7ed7344c88bf4fd6e
|
[
"MIT"
] | null | null | null |
src/allconv.py
|
mayuanyang/doggy
|
dea7a0033a721636d423c7b7ed7344c88bf4fd6e
|
[
"MIT"
] | 1
|
2017-12-10T00:40:40.000Z
|
2017-12-10T00:40:40.000Z
|
from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import (
Input,
Activation,
Dense,
Flatten,
Dropout
)
from keras.layers.convolutional import (
Conv2D,
MaxPooling2D,
AveragePooling2D
)
from keras.utils import np_utils
from keras.optimizers import SGD
from keras import backend as K
from keras.models import Model
from keras.layers.core import Lambda
from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping, ModelCheckpoint
import numpy as np
K.set_image_dim_ordering('tf')
nb_train_samples = 299
nb_validation_samples = 32
batch_size = 32
nb_classes = 3
nb_epoch = 200
# data dir
train_data_dir = 'family/train'
validation_data_dir = 'family/test'
rows, cols = 64, 64
channels = 3
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1),
activation='relu',
input_shape=(rows, cols, channels)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
# model.add(Conv2D(256, (3, 3), activation='relu'))
# model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(nb_classes, activation='relu'))
model.add(Dense(nb_classes, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print (model.summary())
lr_reducer = ReduceLROnPlateau(monitor='val_loss', patience=4, cooldown=0, verbose=1)
early_stopper = EarlyStopping(min_delta=0.001, patience=10)
csv_logger = CSVLogger('doggy_result.csv')
train_datagen = ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
channel_shift_range=0.2,
rotation_range=25,
horizontal_flip=True,
vertical_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
# check this api for more into https://keras.io/preprocessing/image/
test_datagen = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
channel_shift_range=0.1,
rotation_range=25)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(rows, cols),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(rows, cols),
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=nb_epoch,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
callbacks=[lr_reducer, csv_logger])
model.save('keras_allconv.h5')
| 29.083333
| 88
| 0.708055
|
e0d2b3b28bbee54bf1d47fbee06a818a885aa140
| 3,370
|
py
|
Python
|
nexinfosys/restful_service/mod_wsgi/monitor.py
|
MAGIC-nexus/nis-backend
|
dd425925321134f66884f60b202a59b38b7786a0
|
[
"BSD-3-Clause"
] | 6
|
2019-05-31T23:02:30.000Z
|
2022-01-07T22:56:50.000Z
|
nexinfosys/restful_service/mod_wsgi/monitor.py
|
ENVIRO-Module/nis-backend
|
fd86cf30f79f53cdccddd2a5479507d32f914d4e
|
[
"BSD-3-Clause"
] | 2
|
2021-12-03T18:22:42.000Z
|
2021-12-13T19:57:15.000Z
|
nexinfosys/restful_service/mod_wsgi/monitor.py
|
ENVIRO-Module/nis-backend
|
fd86cf30f79f53cdccddd2a5479507d32f914d4e
|
[
"BSD-3-Clause"
] | 3
|
2019-04-05T16:45:09.000Z
|
2021-03-17T12:05:44.000Z
|
"""
Monitor changes in files from the project, published into an Apache2, using "mod_wsgi"
When a change is detected, the application is reloaded automatically, without requiring touching "nis_docker.wsgi" file
From:
http://blog.dscpl.com.au/2008/12/using-modwsgi-when-developing-django.html
https://code.google.com/p/modwsgi/wiki/ReloadingSourceCode#Restarting_Daemon_Processes
Converted to Python 3 using "2to3"
"""
import os
import sys
import signal
import threading
import atexit
import queue
_interval = 1.0
_times = {}
_files = []
_running = False
_queue = queue.Queue()
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr)
print('%s Triggering process restart.' % prefix, file=sys.stderr)
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while 1:
# Check modification times on all files in sys.modules.
for module in list(sys.modules.values()):
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except:
pass
_thread.join()
atexit.register(_exiting)
def track(path):
if path not in _files:
_files.append(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Starting change monitor.' % prefix, file=sys.stderr)
_running = True
_thread.start()
_lock.release()
| 25.725191
| 119
| 0.623145
|
a05166e5980c1ca5f2e10df37d8f8396bdb3eaea
| 13,888
|
py
|
Python
|
blazeface/utils/augment.py
|
ishaghodgaonkar/PyTorch_BlazeFace
|
e3898b763057b213c144ef3fb09ddd4e6eb10445
|
[
"Apache-2.0"
] | null | null | null |
blazeface/utils/augment.py
|
ishaghodgaonkar/PyTorch_BlazeFace
|
e3898b763057b213c144ef3fb09ddd4e6eb10445
|
[
"Apache-2.0"
] | null | null | null |
blazeface/utils/augment.py
|
ishaghodgaonkar/PyTorch_BlazeFace
|
e3898b763057b213c144ef3fb09ddd4e6eb10445
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torchvision import transforms
import cv2
import numpy as np
import types
from numpy import random
def intersect(box_a, box_b):
max_xy = np.minimum(box_a[:, 2:], box_b[2:])
min_xy = np.maximum(box_a[:, :2], box_b[:2])
inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)
return inter[:, 0] * inter[:, 1]
def jaccard_numpy(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding boxes, Shape: [num_boxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])) # [A,B]
area_b = ((box_b[2]-box_b[0]) *
(box_b[3]-box_b[1])) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
class Compose(object):
"""Composes several augmentations together.
Args:
transforms (List[Transform]): list of transforms to compose.
Example:
>>> augmentations.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, boxes=None, labels=None):
for t in self.transforms:
img, boxes, labels = t(img, boxes, labels)
return img, boxes, labels
class Lambda(object):
"""Applies a lambda as a transform."""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img, boxes=None, labels=None):
return self.lambd(img, boxes, labels)
class ConvertFromInts(object):
def __call__(self, image, boxes=None, labels=None):
return image.astype(np.float32), boxes, labels
class SubtractMeans(object):
def __init__(self, mean):
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image, boxes=None, labels=None):
image = image.astype(np.float32)
image -= self.mean
return image.astype(np.float32), boxes, labels
class ToAbsoluteCoords(object):
def __call__(self, image, boxes=None, labels=None):
height, width, channels = image.shape
boxes[:, 0] *= width
boxes[:, 2] *= width
boxes[:, 1] *= height
boxes[:, 3] *= height
return image, boxes, labels
class ToPercentCoords(object):
def __call__(self, image, boxes=None, labels=None):
height, width, channels = image.shape
boxes[:, 0] /= width
boxes[:, 2] /= width
boxes[:, 1] /= height
boxes[:, 3] /= height
return image, boxes, labels
class Resize(object):
def __init__(self, size=300):
self.size = size
def __call__(self, image, boxes=None, labels=None):
image = cv2.resize(image, (self.size,
self.size))
return image, boxes, labels
class RandomSaturation(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 1] *= random.uniform(self.lower, self.upper)
return image, boxes, labels
class RandomHue(object):
def __init__(self, delta=18.0):
assert delta >= 0.0 and delta <= 360.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
image[:, :, 0] += random.uniform(-self.delta, self.delta)
image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
return image, boxes, labels
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
swap = self.perms[random.randint(len(self.perms))]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
return image, boxes, labels
class ConvertColor(object):
def __init__(self, current='BGR', transform='HSV'):
self.transform = transform
self.current = current
def __call__(self, image, boxes=None, labels=None):
if self.current == 'BGR' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif self.current == 'HSV' and self.transform == 'BGR':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
else:
raise NotImplementedError
return image, boxes, labels
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
alpha = random.uniform(self.lower, self.upper)
image *= alpha
return image, boxes, labels
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, boxes=None, labels=None):
if random.randint(2):
delta = random.uniform(-self.delta, self.delta)
image += delta
return image, boxes, labels
class ToCV2Image(object):
def __call__(self, tensor, boxes=None, labels=None):
return tensor.cpu().numpy().astype(np.float32).transpose((1, 2, 0)), boxes, labels
class ToTensor(object):
def __call__(self, cvimage, boxes=None, labels=None):
return torch.from_numpy(cvimage.astype(np.float32)).permute(2, 0, 1), boxes, labels
class RandomSampleCrop(object):
"""Crop
Arguments:
img (Image): the image being input during training
boxes (Tensor): the original bounding boxes in pt form
labels (Tensor): the class labels for each bbox
mode (float tuple): the min and max jaccard overlaps
Return:
(img, boxes, classes)
img (Image): the cropped image
boxes (Tensor): the adjusted bounding boxes in pt form
labels (Tensor): the class labels for each bbox
"""
def __init__(self):
self.sample_options = (
# using entire original input image
None,
# sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9
(0.1, None),
(0.3, None),
(0.7, None),
(0.9, None),
# randomly sample a patch
(None, None),
)
def __call__(self, image, boxes=None, labels=None):
height, width, _ = image.shape
while True:
# randomly choose a mode
mode = random.choice(self.sample_options)
if mode is None:
return image, boxes, labels
min_iou, max_iou = mode
if min_iou is None:
min_iou = float('-inf')
if max_iou is None:
max_iou = float('inf')
# max trails (50)
for _ in range(50):
current_image = image
w = random.uniform(0.3 * width, width)
h = random.uniform(0.3 * height, height)
# aspect ratio constraint b/t .5 & 2
if h / w < 0.5 or h / w > 2:
continue
left = random.uniform(width - w)
top = random.uniform(height - h)
# convert to integer rect x1,y1,x2,y2
rect = np.array([int(left), int(top), int(left+w), int(top+h)])
# calculate IoU (jaccard overlap) b/t the cropped and gt boxes
overlap = jaccard_numpy(boxes, rect)
# is min and max overlap constraint satisfied? if not try again
if overlap.min() < min_iou and max_iou < overlap.max():
continue
# cut the crop from the image
current_image = current_image[rect[1]:rect[3], rect[0]:rect[2],
:]
# keep overlap with gt box IF center in sampled patch
centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0
# mask in all gt boxes that above and to the left of centers
m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])
# mask in all gt boxes that under and to the right of centers
m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])
# mask in that both m1 and m2 are true
mask = m1 * m2
# have any valid boxes? try again if not
if not mask.any():
continue
# take only matching gt boxes
current_boxes = boxes[mask, :].copy()
# take only matching gt labels
current_labels = labels[mask]
# should we use the box left and top corner or the crop's
current_boxes[:, :2] = np.maximum(current_boxes[:, :2],
rect[:2])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, :2] -= rect[:2]
current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:],
rect[2:])
# adjust to crop (by substracting crop's left,top)
current_boxes[:, 2:] -= rect[:2]
return current_image, current_boxes, current_labels
class Expand(object):
def __init__(self, mean):
self.mean = mean
def __call__(self, image, boxes, labels):
if random.randint(2):
return image, boxes, labels
height, width, depth = image.shape
ratio = random.uniform(1, 4)
left = random.uniform(0, width*ratio - width)
top = random.uniform(0, height*ratio - height)
expand_image = np.zeros(
(int(height*ratio), int(width*ratio), depth),
dtype=image.dtype)
expand_image[:, :, :] = self.mean
expand_image[int(top):int(top + height),
int(left):int(left + width)] = image
image = expand_image
boxes = boxes.copy()
boxes[:, :2] += (int(left), int(top))
boxes[:, 2:] += (int(left), int(top))
return image, boxes, labels
class RandomMirror(object):
def __call__(self, image, boxes, classes):
_, width, _ = image.shape
if random.randint(2):
image = image[:, ::-1]
boxes = boxes.copy()
boxes[:, 0::2] = width - boxes[:, 2::-2]
return image, boxes, classes
class SwapChannels(object):
"""Transforms a tensorized image by swapping the channels in the order
specified in the swap tuple.
Args:
swaps (int triple): final order of channels
eg: (2, 1, 0)
"""
def __init__(self, swaps):
self.swaps = swaps
def __call__(self, image):
"""
Args:
image (Tensor): image tensor to be transformed
Return:
a tensor with channels swapped according to swap
"""
# if torch.is_tensor(image):
# image = image.data.cpu().numpy()
# else:
# image = np.array(image)
image = image[:, :, self.swaps]
return image
class PhotometricDistort(object):
def __init__(self):
self.pd = [
RandomContrast(),
ConvertColor(transform='HSV'),
RandomSaturation(),
RandomHue(),
ConvertColor(current='HSV', transform='BGR'),
RandomContrast()
]
self.rand_brightness = RandomBrightness()
self.rand_light_noise = RandomLightingNoise()
def __call__(self, image, boxes, labels):
im = image.copy()
im, boxes, labels = self.rand_brightness(im, boxes, labels)
if random.randint(2):
distort = Compose(self.pd[:-1])
else:
distort = Compose(self.pd[1:])
im, boxes, labels = distort(im, boxes, labels)
return self.rand_light_noise(im, boxes, labels)
class SSDAugmentation(object):
def __init__(self, size=300, mean=(104, 117, 123)):
self.mean = mean
self.size = size
self.augment = Compose([
ConvertFromInts(),
ToAbsoluteCoords(),
PhotometricDistort(),
Expand(self.mean),
RandomSampleCrop(),
RandomMirror(),
ToPercentCoords(),
Resize(self.size),
SubtractMeans(self.mean)
])
def __call__(self, img, boxes, labels):
return self.augment(img, boxes, labels)
| 32.988124
| 92
| 0.534202
|
eadce5479405a6eb0781abc49dc1078d556cc7f9
| 4,650
|
py
|
Python
|
Model/predictor_dl_model/predictor_dl_model/trainer/client_rest_tf.py
|
helenyu18/blue-marlin
|
668985fad1993a682808e271610c1cf2cec6a6f5
|
[
"Apache-2.0"
] | null | null | null |
Model/predictor_dl_model/predictor_dl_model/trainer/client_rest_tf.py
|
helenyu18/blue-marlin
|
668985fad1993a682808e271610c1cf2cec6a6f5
|
[
"Apache-2.0"
] | null | null | null |
Model/predictor_dl_model/predictor_dl_model/trainer/client_rest_tf.py
|
helenyu18/blue-marlin
|
668985fad1993a682808e271610c1cf2cec6a6f5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, Futurewei Technologies
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A client that talks to tensorflow_serving loaded with kaggle model.
The client read kaggle feature data set, queries the service with
such feature data to get predictions, and calculates the inference error rate.
Typical usage example:
predictor_client.py --server=0.0.0.0:8500
"""
from __future__ import print_function
import sys
import threading
import os
import pickle
import tensorflow as tf
from predictor_dl_model.trainer.feeder import VarFeeder
from predictor_dl_model.trainer.input_pipe import ucdoc_features
from predictor_dl_model.trainer.input_pipe import ModelMode
from predictor_dl_model.trainer.input_pipe import InputPipe
from enum import Enum
from typing import List, Iterable
import numpy as np
import pandas as pd
import json
import requests
tf.app.flags.DEFINE_integer(
'concurrency', 1, 'maximum number of concurrent inference requests')
tf.app.flags.DEFINE_integer(
'batch_size', 1024, 'number of sample in each batch')
tf.app.flags.DEFINE_integer('predict_window', 10, 'Number of days to predict')
tf.app.flags.DEFINE_integer(
'train_window', 60, 'number of time spots in training')
tf.app.flags.DEFINE_string('server', '', 'PredictionService host:port')
tf.app.flags.DEFINE_string(
'result_dir', 'data/predict', 'directory to put prediction result.')
tf.app.flags.DEFINE_boolean(
'verbose', False, 'verbose or not in creating input data')
FLAGS = tf.app.flags.FLAGS
BATCH_SIZE = 1 # Has to be 1
def main(_):
# if not FLAGS.server:
# print('please specify server host:port')
# return
with tf.variable_scope('input') as inp_scope:
with tf.device("/cpu:0"):
inp = VarFeeder.read_vars("data/vars")
pipe = InputPipe(inp, ucdoc_features(inp), inp.hits.shape[0], mode=ModelMode.PREDICT,
batch_size=BATCH_SIZE, n_epoch=1, verbose=False,
train_completeness_threshold=0.01, predict_window=10,
predict_completeness_threshold=0.0, train_window=60,
back_offset=11)
error = []
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
pipe.load_vars(sess)
pipe.init_iterator(sess)
for i in range(100):
truex, timex, normx, laggedx, truey, timey, normy, normmean, normstd, pgfeatures, pageix = sess.run([pipe.true_x, pipe.time_x, pipe.norm_x, pipe.lagged_x, pipe.true_y, pipe.time_y,
pipe.norm_y, pipe.norm_mean, pipe.norm_std, pipe.ucdoc_features, pipe.page_ix])
# if pageix == b'cloudFolder,2,4G,g_f,2,pt,1002,icc,3,10':
# print("hello")
data = {"instances": [{"truex": truex.tolist()[0], "timex": timex.tolist()[0], "normx": normx.tolist()[0], "laggedx": laggedx.tolist()[0],
"truey": truey.tolist()[0], "timey": timey.tolist()[0], "normy": normy.tolist()[0], "normmean": normmean.tolist()[0],
"normstd": normstd.tolist()[0], "page_features": pgfeatures.tolist()[0], "pageix": [pageix.tolist()[0].decode('utf-8')]}]}
URL = "http://10.193.217.108:8501/v1/models/faezeh1:predict"
body = data
r = requests.post(URL, data=json.dumps(body))
pred_y=np.round(np.expm1(r.json()['predictions'][0]))
true_y = np.round(np.expm1(truey))
e = np.average(np.divide(np.abs(np.subtract(pred_y,true_y)),true_y))
error.append(e)
print( data['instances'][0]['pageix'][0], e )
print(np.average(error))
if __name__ == '__main__':
tf.app.run()
| 41.517857
| 192
| 0.654839
|
a5f2b83127acf907520597bf9fcb335514cfbf17
| 3,378
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/datashare/list_share_synchronizations.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/datashare/list_share_synchronizations.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/datashare/list_share_synchronizations.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'ListShareSynchronizationsResult',
'AwaitableListShareSynchronizationsResult',
'list_share_synchronizations',
]
@pulumi.output_type
class ListShareSynchronizationsResult:
"""
List response for get ShareSynchronization.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
The Url of next result page.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Sequence['outputs.ShareSynchronizationResponseResult']:
"""
Collection of items of type DataTransferObjects.
"""
return pulumi.get(self, "value")
class AwaitableListShareSynchronizationsResult(ListShareSynchronizationsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListShareSynchronizationsResult(
next_link=self.next_link,
value=self.value)
def list_share_synchronizations(account_name: Optional[str] = None,
filter: Optional[str] = None,
orderby: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_name: Optional[str] = None,
skip_token: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListShareSynchronizationsResult:
"""
List response for get ShareSynchronization.
API Version: 2020-09-01.
:param str account_name: The name of the share account.
:param str filter: Filters the results using OData syntax.
:param str orderby: Sorts the results using OData syntax.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share.
:param str skip_token: Continuation token
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['filter'] = filter
__args__['orderby'] = orderby
__args__['resourceGroupName'] = resource_group_name
__args__['shareName'] = share_name
__args__['skipToken'] = skip_token
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:datashare:listShareSynchronizations', __args__, opts=opts, typ=ListShareSynchronizationsResult).value
return AwaitableListShareSynchronizationsResult(
next_link=__ret__.next_link,
value=__ret__.value)
| 36.322581
| 152
| 0.660746
|
56c3ef02300406784cde28ee665565c3f54f7075
| 2,542
|
py
|
Python
|
setup.py
|
pallomabritoGN/gn-api-sdk-python
|
c9da015207d91abb0a3c0d7b6bcbb146a7e5b849
|
[
"MIT"
] | null | null | null |
setup.py
|
pallomabritoGN/gn-api-sdk-python
|
c9da015207d91abb0a3c0d7b6bcbb146a7e5b849
|
[
"MIT"
] | null | null | null |
setup.py
|
pallomabritoGN/gn-api-sdk-python
|
c9da015207d91abb0a3c0d7b6bcbb146a7e5b849
|
[
"MIT"
] | null | null | null |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
dependencies = [dependency.strip()
for dependency in open("requirements.txt").readlines()]
setup(
name='gerencianet',
version='1.1.0',
description='Module for integration with Gerencianet API',
long_description=long_description,
# The project's main homepage.
url='https://github.com/gerencianet/gn-api-sdk-python',
# Author details
author='Danniel Hugo, Cecilia Deveza, Francisco Thiene, Thomaz Feitoza, Talita Campos ',
author_email='suportetecnico@gerencianet.com.br',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
# What does your project relate to?
keywords='payment Gerencianet',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'test': ['pytest-cov', 'pytest', 'responses'],
},
package_data={
# If any package contains *.txt or *.rst files, include them:
'': ['requirements.txt', '.md', 'LICENSE']
},
include_package_data=True,
install_requires=dependencies,
)
| 30.261905
| 92
| 0.656176
|
b69359771c76a537373c4a89925f71150e394ec6
| 70
|
py
|
Python
|
tests/data/__init__.py
|
chryssa-zrv/UA_COMET
|
527e7c86bd0a0d8ff90efda58e820108a5666b92
|
[
"Apache-2.0"
] | null | null | null |
tests/data/__init__.py
|
chryssa-zrv/UA_COMET
|
527e7c86bd0a0d8ff90efda58e820108a5666b92
|
[
"Apache-2.0"
] | null | null | null |
tests/data/__init__.py
|
chryssa-zrv/UA_COMET
|
527e7c86bd0a0d8ff90efda58e820108a5666b92
|
[
"Apache-2.0"
] | null | null | null |
import os
DATA_PATH = os.path.dirname(os.path.abspath(__file__))+"/"
| 17.5
| 58
| 0.728571
|
d1106da7bb331ebae5a66d6d7d7da2666b528b63
| 1,698
|
py
|
Python
|
benchmark/experimental_vocab.py
|
guyang3532/text
|
e2fc987ff6a002018040cffac5e0d61c3d0b06c6
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/experimental_vocab.py
|
guyang3532/text
|
e2fc987ff6a002018040cffac5e0d61c3d0b06c6
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/experimental_vocab.py
|
guyang3532/text
|
e2fc987ff6a002018040cffac5e0d61c3d0b06c6
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import (Counter, OrderedDict)
import time
import torch
from torchtext.experimental.datasets import AG_NEWS
from torchtext.experimental.vocab import Vocab as VocabExperimental
from torchtext.vocab import Vocab
def benchmark_experimental_vocab():
def _run_benchmark_lookup(tokens, vocab):
t0 = time.monotonic()
for token in tokens:
vocab[token]
print("Lookup time:", time.monotonic() - t0)
train, = AG_NEWS(data_select='train')
vocab = train.get_vocab()
tokens = []
for (label, text) in train:
for id in text.tolist():
tokens.append(vocab.itos[id])
counter = Counter(tokens)
sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: x[1], reverse=True)
ordered_dict = OrderedDict(sorted_by_freq_tuples)
# existing Vocab construction
print("Vocab")
t0 = time.monotonic()
v_existing = Vocab(counter)
print("Construction time:", time.monotonic() - t0)
# experimental Vocab construction
print("Vocab Experimental")
t0 = time.monotonic()
v_experimental = VocabExperimental(ordered_dict)
print("Construction time:", time.monotonic() - t0)
jit_v_experimental = torch.jit.script(v_experimental)
# existing Vocab not jit lookup
print("Vocab - Not Jit Mode")
_run_benchmark_lookup(tokens, v_existing)
# experimental Vocab not jit lookup
print("Vocab Experimental - Not Jit Mode")
_run_benchmark_lookup(tokens, v_experimental)
# experimental Vocab jit lookup
print("Vocab Experimental - Jit Mode")
_run_benchmark_lookup(tokens, jit_v_experimental)
if __name__ == "__main__":
benchmark_experimental_vocab()
| 30.321429
| 85
| 0.706125
|
109650a7ccde98c3db4a46f0e4299db624498068
| 2,713
|
py
|
Python
|
minetext/clustering/distance.py
|
CaioMelo8/android-comments-miner
|
d564cc3f44bd4423e8d2621e30650d0d21436624
|
[
"MIT"
] | null | null | null |
minetext/clustering/distance.py
|
CaioMelo8/android-comments-miner
|
d564cc3f44bd4423e8d2621e30650d0d21436624
|
[
"MIT"
] | null | null | null |
minetext/clustering/distance.py
|
CaioMelo8/android-comments-miner
|
d564cc3f44bd4423e8d2621e30650d0d21436624
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from math import sqrt, exp
import numpy as np
class LevenshteinCalculator(object):
def calculate(self, source, target):
if len(source) < len(target):
return self.calculate(target, source)
if len(target) == 0:
return len(source)
source = np.array(tuple(source))
target = np.array(tuple(target))
previous_row = np.arange(target.size + 1)
for s in source:
current_row = previous_row + 1
current_row[1:] = np.minimum(current_row[1:], np.add(previous_row[:-1], target != s))
current_row[1:] = np.minimum(current_row[1:], current_row[0:-1] + 1)
previous_row = current_row
return previous_row[-1] / len(source)
class EuclideanCalculator(object):
def calculate(self, source, target):
x1 = float(source["latitude"])
x2 = float(target["latitude"])
y1 = float(source["longitude"])
y2 = float(target["longitude"])
return sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
class FadingCalculator(object):
def calculate(self, source, target):
FMT = "%H:%M:%S"
if datetime.strptime(source["time"], FMT) > datetime.strptime(target["time"], FMT):
tdelta = datetime.strptime(source["time"], FMT) - datetime.strptime(target["time"], FMT)
else:
tdelta = datetime.strptime(target["time"], FMT) - datetime.strptime(source["time"], FMT)
timeDifference = tdelta.seconds / 60.0 / 60
words1 = set(source.split())
words2 = set(target.split())
duplicates = words1.intersection(words2)
uniques = words1.union(words2.difference(words1))
try:
simi = float(len(duplicates)) / (len(uniques) * exp(timeDifference))
return simi
except:
return 0.0
class JaccardCalculatorSimilarity(object):
def calculate(self, source, target):
words1 = set(source.split())
words2 = set(target.split())
duplicated = len(words1.intersection(words2))
uniques = len(words1.union(words2.difference(words1)))
try:
simi = float(duplicated) / uniques
return simi
except ZeroDivisionError:
return 0.0
class JaccardCalculatorDistance(object):
def calculate(self, source, target):
words1 = set(source.split())
words2 = set(target.split())
duplicated = len(words1.intersection(words2))
uniques = len(words1.union(words2.difference(words1)))
try:
simi = float(duplicated) / uniques
return 1 - simi
except ZeroDivisionError:
return 1
| 30.483146
| 100
| 0.597125
|
c776157e549a4f20884b74a074de4f709a0d38e4
| 4,211
|
py
|
Python
|
configs/railway/ssd_0422_all_merged.py
|
huminghe/mmdetection
|
37a3e5d1891a177f9cd16f3ed53195f2d8c2ef70
|
[
"Apache-2.0"
] | null | null | null |
configs/railway/ssd_0422_all_merged.py
|
huminghe/mmdetection
|
37a3e5d1891a177f9cd16f3ed53195f2d8c2ef70
|
[
"Apache-2.0"
] | null | null | null |
configs/railway/ssd_0422_all_merged.py
|
huminghe/mmdetection
|
37a3e5d1891a177f9cd16f3ed53195f2d8c2ef70
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
type='SingleStageDetector',
backbone=dict(
type='SSDVGG',
input_size=input_size,
depth=16,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
l2_norm_scale=20),
neck=None,
bbox_head=dict(
type='SSDHead',
in_channels=(512, 1024, 512, 256, 256, 256),
num_classes=10,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False),
test_cfg=dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/home/railway/workspace/hmh/data/data/doors/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
classes = ('door-opened', 'door-closed', 'window-side', 'window-front', 'people-whole-body', 'food container', 'chopsticks fork spoon', 'food', 'cigarette', 'mobile phone')
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'railway_doors_all_merged_0422.json',
img_prefix=data_root + 'Images/',
classes=classes,
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'railway_doors_all_merged_0422.json',
img_prefix=data_root + 'Images/',
classes=classes,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'railway_doors_all_merged_0422.json',
img_prefix=data_root + 'Images/',
classes=classes,
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
checkpoint_config = dict(interval=2)
evaluation = dict(interval=2, metric=['bbox'])
load_from = '/home/railway/workspace/hmh/data/models/detection/ssd300_coco_20200307-a92d2092.pth'
| 32.392308
| 172
| 0.594396
|
8395082c87447aaa793ec71b99484128d713d9c1
| 1,595
|
py
|
Python
|
tests/test_scene_parser/test_dvd.py
|
seedzero/Caper
|
9aad10e22d7c94fd67fb164f37764c9eb5f9c75b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_scene_parser/test_dvd.py
|
seedzero/Caper
|
9aad10e22d7c94fd67fb164f37764c9eb5f9c75b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_scene_parser/test_dvd.py
|
seedzero/Caper
|
9aad10e22d7c94fd67fb164f37764c9eb5f9c75b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Dean Gardiner <gardiner91@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from helpers import setup_path
setup_path()
import logging
from logr import Logr
Logr.configure(logging.DEBUG)
from caper import Caper
from matchers import has_info
from hamcrest import *
caper = Caper()
def test_dvd_region():
assert_that(
caper.parse('Show Name (2011) S01 R1 NTSC'),
has_info('dvd', {'region': '1'})
)
assert_that(
caper.parse('Show Name (2011) S01 R4 PAL'),
has_info('dvd', {'region': '4'})
)
def test_dvd_encoding():
assert_that(
caper.parse('Show Name (2011) S01 R1 NTSC'),
has_info('dvd', {'encoding': 'NTSC'})
)
assert_that(
caper.parse('Show Name (2011) S01 R4 PAL'),
has_info('dvd', {'encoding': 'PAL'})
)
def test_dvd_disc():
assert_that(
caper.parse('Show Name (2011) S01 R1 NTSC DISC3'),
has_info('dvd', {'disc': '3'})
)
assert_that(
caper.parse('Show Name (2011) S01 R4 PAL D2'),
has_info('dvd', {'disc': '2'})
)
| 25.31746
| 74
| 0.65768
|
b90be8f5d757d7321ea104fbebe8df80c5e73a6a
| 795
|
py
|
Python
|
xlsxwriter/test/comparison/test_image16.py
|
timgates42/XlsxWriter
|
129044ed821de67895b4562c6b71f90eba5be6b4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_image16.py
|
timgates42/XlsxWriter
|
129044ed821de67895b4562c6b71f90eba5be6b4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/comparison/test_image16.py
|
timgates42/XlsxWriter
|
129044ed821de67895b4562c6b71f90eba5be6b4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image16.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('C2', self.image_dir + 'issue32.png')
workbook.close()
self.assertExcelEqual()
| 23.382353
| 79
| 0.613836
|
ba4065a71acc2724153f63a37efa0aa257c4af42
| 2,035
|
py
|
Python
|
python/example/render_hex_mesh.py
|
brokencuph/diff_pd
|
2c30ecfa39762c5fc78dea9c7a226000e9fc5c15
|
[
"MIT"
] | 4
|
2022-02-10T02:28:42.000Z
|
2022-02-10T07:28:35.000Z
|
python/example/render_hex_mesh.py
|
srl-ethz/diffPD_sim2real
|
e491668995a163b8ff7542d99f0b4e0c0f4ed2df
|
[
"MIT"
] | null | null | null |
python/example/render_hex_mesh.py
|
srl-ethz/diffPD_sim2real
|
e491668995a163b8ff7542d99f0b4e0c0f4ed2df
|
[
"MIT"
] | 2
|
2022-03-11T20:13:24.000Z
|
2022-03-12T03:38:46.000Z
|
import sys
sys.path.append('../')
import os
from pathlib import Path
import numpy as np
from PIL import Image
from contextlib import contextmanager, redirect_stderr, redirect_stdout
from py_diff_pd.common.common import create_folder, ndarray
from py_diff_pd.common.common import print_info, print_ok, print_error
from py_diff_pd.common.display import render_hex_mesh
from py_diff_pd.common.hex_mesh import generate_hex_mesh
from py_diff_pd.core.py_diff_pd_core import HexMesh3d
def image_to_numpy_array(img_name):
img = Image.open(img_name).convert('RGB')
img_data = ndarray(img.getdata()).reshape(img.size[0], img.size[1], 3) / 255
return img_data
def compare_images(img_data1, img_data2, abs_tol, rel_tol):
return all(np.abs(img_data1.ravel() - img_data2.ravel()) <= abs_tol + img_data1.ravel() * rel_tol)
def test_render_hex_mesh(verbose):
render_ok = True
folder = Path('render_hex_mesh')
voxels = np.ones((10, 10, 10))
bin_file_name = str(folder / 'cube.bin')
generate_hex_mesh(voxels, 0.1, (0, 0, 0), bin_file_name)
mesh = HexMesh3d()
mesh.Initialize(bin_file_name)
resolution = (400, 400)
sample_num = 64
render_hex_mesh(mesh, folder / 'render_hex_mesh_1.png', resolution=resolution, sample=sample_num)
if verbose:
os.system('eog {}'.format(folder / 'render_hex_mesh_1.png'))
# Demonstrate more advanced options.
resolution = (600, 600)
sample_num = 16
# Scale the cube by 0.5, rotate along the vertical axis by 30 degrees, and translate by (0.5, 0.5, 0).
transforms = [('s', 0.5), ('r', (np.pi / 6, 0, 0, 1)), ('t', (0.5, 0.5, 0))]
render_hex_mesh(mesh, folder / 'render_hex_mesh_2.png', resolution=resolution, sample=sample_num, transforms=transforms,
render_voxel_edge=True)
if verbose:
os.system('eog {}'.format(folder / 'render_hex_mesh_2.png'))
return True
if __name__ == '__main__':
# Use verbose = True by default in all example scripts.
verbose = True
test_render_hex_mesh(verbose)
| 37
| 124
| 0.713022
|
e4747d08e076bb3a14c0def8cd07c725a2d6f051
| 2,172
|
py
|
Python
|
embedding-calculator/srcext/insightface/src/data/dir2lst_ytf.py
|
drawdy/CompreFace
|
143b7955536f406a622248fad2d2108dfb5dd4f6
|
[
"Apache-2.0"
] | null | null | null |
embedding-calculator/srcext/insightface/src/data/dir2lst_ytf.py
|
drawdy/CompreFace
|
143b7955536f406a622248fad2d2108dfb5dd4f6
|
[
"Apache-2.0"
] | null | null | null |
embedding-calculator/srcext/insightface/src/data/dir2lst_ytf.py
|
drawdy/CompreFace
|
143b7955536f406a622248fad2d2108dfb5dd4f6
|
[
"Apache-2.0"
] | null | null | null |
# Version: 2020.02.21
#
# MIT License
#
# Copyright (c) 2018 Jiankang Deng and Jia Guo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
from easydict import EasyDict as edict
input_dir = '/raid5data/dplearn/YTF/aligned_images_DB'
ret = []
label = 0
person_names = []
for person_name in os.listdir(input_dir):
person_names.append(person_name)
person_names = sorted(person_names)
for person_name in person_names:
_subdir = os.path.join(input_dir, person_name)
if not os.path.isdir(_subdir):
continue
for _subdir2 in os.listdir(_subdir):
_subdir2 = os.path.join(_subdir, _subdir2)
if not os.path.isdir(_subdir2):
continue
_ret = []
for img in os.listdir(_subdir2):
fimage = edict()
fimage.id = os.path.join(_subdir2, img)
fimage.classname = str(label)
fimage.image_path = os.path.join(_subdir2, img)
fimage.bbox = None
fimage.landmark = None
_ret.append(fimage)
ret += _ret
label += 1
for item in ret:
print("%d\t%s\t%d" % (1, item.image_path, int(item.classname)))
| 37.448276
| 81
| 0.699355
|
9a4b84f720a0fadebfc07b67c7cad4d084768d6b
| 2,237
|
py
|
Python
|
src/rezgui/dialogs/BrowsePackageDialog.py
|
alexey-pelykh/rez
|
ad12105d89d658e4d2ea9249e537b3de90391f0e
|
[
"Apache-2.0"
] | null | null | null |
src/rezgui/dialogs/BrowsePackageDialog.py
|
alexey-pelykh/rez
|
ad12105d89d658e4d2ea9249e537b3de90391f0e
|
[
"Apache-2.0"
] | null | null | null |
src/rezgui/dialogs/BrowsePackageDialog.py
|
alexey-pelykh/rez
|
ad12105d89d658e4d2ea9249e537b3de90391f0e
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from Qt import QtWidgets
from rezgui.util import create_pane
from rezgui.mixins.StoreSizeMixin import StoreSizeMixin
from rezgui.widgets.BrowsePackageWidget import BrowsePackageWidget
from rezgui.objects.App import app
class BrowsePackageDialog(QtWidgets.QDialog, StoreSizeMixin):
def __init__(self, context_model, package_text=None, parent=None,
close_only=False, lock_package=False,
package_selectable_callback=None):
config_key = "layout/window/browse_package"
super(BrowsePackageDialog, self).__init__(parent)
StoreSizeMixin.__init__(self, app.config, config_key)
self.setWindowTitle("Find Package")
self.package = None
self.widget = BrowsePackageWidget(
context_model, self, lock_package=lock_package,
package_selectable_callback=package_selectable_callback)
self.ok_btn = QtWidgets.QPushButton("Ok")
buttons = [self.ok_btn]
if close_only:
close_btn = QtWidgets.QPushButton("Close")
buttons.insert(0, close_btn)
close_btn.clicked.connect(self.close)
self.ok_btn.hide()
else:
cancel_btn = QtWidgets.QPushButton("Cancel")
cancel_btn.clicked.connect(self.close)
buttons.insert(0, cancel_btn)
self.ok_btn.setEnabled(False)
btn_pane = create_pane([None] + buttons, True)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.widget)
layout.addWidget(btn_pane)
self.setLayout(layout)
self.ok_btn.clicked.connect(self._ok)
self.widget.packageSelected.connect(self._set_package)
self.widget.set_package_text(package_text)
def _set_package(self):
package = self.widget.current_package()
if package is None:
self.setWindowTitle("Find Package")
self.ok_btn.setEnabled(False)
else:
self.setWindowTitle("Find Package - %s" % package.qualified_name)
self.ok_btn.setEnabled(True)
def _ok(self):
self.package = self.widget.current_package()
self.close()
| 34.953125
| 77
| 0.671882
|
757b0d5537634c2c0e26dbd2c08a382f4c43fda7
| 8,205
|
py
|
Python
|
Lib/test/test_urllibnet.py
|
jimmyyu2004/jython
|
5b4dc2d54d01a6fda8c55d07b2608167e7a40769
|
[
"CNRI-Jython"
] | 332
|
2015-08-22T12:43:56.000Z
|
2022-03-17T01:05:43.000Z
|
Lib/test/test_urllibnet.py
|
Pandinosaurus/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
[
"CNRI-Jython"
] | 36
|
2015-05-30T08:39:19.000Z
|
2022-03-04T20:42:33.000Z
|
Lib/test/test_urllibnet.py
|
Pandinosaurus/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
[
"CNRI-Jython"
] | 74
|
2015-05-29T17:18:53.000Z
|
2022-01-15T14:06:44.000Z
|
#!/usr/bin/env python
import unittest
from test import support
import socket
import urllib.request, urllib.parse, urllib.error
import sys
import os
import time
mimetools = support.import_module("mimetools", deprecated=True)
def _open_with_retry(func, host, *args, **kwargs):
# Connecting to remote hosts is flaky. Make it more robust
# by retrying the connection several times.
for i in range(3):
try:
return func(host, *args, **kwargs)
except IOError as last_exc:
continue
except:
raise
raise last_exc
class URLTimeoutTest(unittest.TestCase):
TIMEOUT = 10.0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
f = _open_with_retry(urllib.request.urlopen, "http://www.python.org/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.python.org/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
def urlopen(self, *args):
return _open_with_retry(urllib.request.urlopen, *args)
def test_basic(self):
# Simple test expected to pass.
open_url = self.urlopen("http://www.python.org/")
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assertTrue(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_readlines(self):
# Test both readline and readlines.
open_url = self.urlopen("http://www.python.org/")
try:
self.assertIsInstance(open_url.readline(), str,
"readline did not return a string")
self.assertIsInstance(open_url.readlines(), list,
"readlines did not return a list")
finally:
open_url.close()
def test_info(self):
# Test 'info'.
open_url = self.urlopen("http://www.python.org/")
try:
info_obj = open_url.info()
finally:
open_url.close()
self.assertIsInstance(info_obj, mimetools.Message,
"object returned by 'info' is not an "
"instance of mimetools.Message")
self.assertEqual(info_obj.getsubtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
#
# This test has been changed from what's currently in our
# lib-python/2.7 for Jython due to recent updates at the
# python.org to use https; other tests can take advantate of
# URL redirection
URL = "https://www.python.org/"
open_url = self.urlopen(URL)
try:
gotten_url = open_url.geturl()
finally:
open_url.close()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.python.org/XXXinvalidXXX"
open_url = urllib.request.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
@unittest.skipIf(support.is_jython, "Sockets cannot be used as file descriptors")
def test_fileno(self):
if (sys.platform in ('win32',) or
not hasattr(os, 'fdopen')):
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
return
# Make sure fd returned by fileno is valid.
open_url = self.urlopen("http://www.python.org/")
fd = open_url.fileno()
FILE = os.fdopen(fd)
try:
self.assertTrue(FILE.read(), "reading from file created using fd "
"returned by fileno failed")
finally:
FILE.close()
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
bogus_domain = "sadflkjsasf.i.nvali.d"
try:
socket.gethostbyname(bogus_domain)
except socket.gaierror:
pass
else:
# This happens with some overzealous DNS providers such as OpenDNS
self.skipTest("%r should not resolve for test to work" % bogus_domain)
self.assertRaises(IOError,
# SF patch 809915: In Sep 2003, VeriSign started
# highjacking invalid .com and .net addresses to
# boost traffic to their own site. This test
# started failing then. One hopes the .invalid
# domain will be spared to serve its defined
# purpose.
# urllib.urlopen, "http://www.sadflkjsasadf.com/")
urllib.request.urlopen, "http://sadflkjsasf.i.nvali.d/")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.urlretrieve using the network."""
def urlretrieve(self, *args):
return _open_with_retry(urllib.request.urlretrieve, *args)
def test_basic(self):
# Test basic functionality.
file_location, info = self.urlretrieve("http://www.python.org/")
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
FILE = file(file_location)
try:
self.assertTrue(FILE.read(), "reading from the file location returned"
" by urlretrieve failed")
finally:
FILE.close()
os.unlink(file_location)
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
file_location, info = self.urlretrieve("http://www.python.org/",
support.TESTFN)
self.assertEqual(file_location, support.TESTFN)
self.assertTrue(os.path.exists(file_location))
FILE = file(file_location)
try:
self.assertTrue(FILE.read(), "reading from temporary file failed")
finally:
FILE.close()
os.unlink(file_location)
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
file_location, header = self.urlretrieve("http://www.python.org/")
os.unlink(file_location)
self.assertIsInstance(header, mimetools.Message,
"header is not an instance of mimetools.Message")
def test_data_header(self):
logo = "http://www.python.org/community/logos/python-logo-master-v3-TM.png"
file_location, fileheaders = self.urlretrieve(logo)
os.unlink(file_location)
datevalue = fileheaders.getheader('Date')
dateformat = '%a, %d %b %Y %H:%M:%S GMT'
try:
time.strptime(datevalue, dateformat)
except ValueError:
self.fail('Date value not in %r format', dateformat)
def test_main():
support.requires('network')
with support.check_py3k_warnings(
("urllib.urlopen.. has been removed", DeprecationWarning)):
support.run_unittest(URLTimeoutTest,
urlopenNetworkTests,
urlretrieveNetworkTests)
if __name__ == "__main__":
test_main()
| 37.295455
| 85
| 0.5961
|
3911ab48338ce149950f4ef95a156f1540c3e514
| 3,325
|
py
|
Python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2016_08_01/_configuration.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2016_08_01/_configuration.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2016_08_01/_configuration.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class WebSiteManagementClientConfiguration(Configuration):
"""Configuration for WebSiteManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Your Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(WebSiteManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2016-08-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-web/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 46.830986
| 132
| 0.686316
|
199744d0636d1f1180268da32663f1a92da92ab2
| 210
|
py
|
Python
|
steps/pytorch/architectures/utils.py
|
dineshsonachalam/data-science-bowl-2018
|
d791c007cc5bfe313df0f05d2c3970cd316a4ba2
|
[
"MIT"
] | 11
|
2018-05-10T08:56:05.000Z
|
2018-06-23T05:00:36.000Z
|
steps/pytorch/architectures/utils.py
|
dineshsonachalam/data-science-bowl-2018
|
d791c007cc5bfe313df0f05d2c3970cd316a4ba2
|
[
"MIT"
] | 29
|
2018-05-05T10:56:44.000Z
|
2018-06-17T17:14:30.000Z
|
steps/pytorch/architectures/utils.py
|
dineshsonachalam/data-science-bowl-2018
|
d791c007cc5bfe313df0f05d2c3970cd316a4ba2
|
[
"MIT"
] | 8
|
2018-04-16T07:15:25.000Z
|
2019-06-25T12:42:53.000Z
|
import torch.nn as nn
class Reshape(nn.Module):
def __init__(self, *shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(*self.shape)
| 21
| 39
| 0.619048
|
4ae3739692731e7e7fb097b37540fc955acb86bf
| 6,378
|
py
|
Python
|
EvernoteTest.py
|
Lee-Kevin/EvernoteApp
|
9a57c38787f41c37295b6ec753d528cbda1eb7a9
|
[
"MIT"
] | null | null | null |
EvernoteTest.py
|
Lee-Kevin/EvernoteApp
|
9a57c38787f41c37295b6ec753d528cbda1eb7a9
|
[
"MIT"
] | null | null | null |
EvernoteTest.py
|
Lee-Kevin/EvernoteApp
|
9a57c38787f41c37295b6ec753d528cbda1eb7a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from evernote.api.client import EvernoteClient
from HTMLParser import HTMLParser
import talkey
from weather import weatherReport
import threading
import time
logging.basicConfig(level='INFO')
# define a global threading lock
Global_Lock = threading.Lock()
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.ToDo = []
self.Flag = None
def handle_starttag(self, tag, attrs):
logging.info("Encountered a start tag: %s, %s", tag,attrs)
if tag == "en-todo":
logging.info( "this is to do tag:")
if len(attrs) == 0: # Here is the things that need to be done
self.Flag = True
logging.info("Here is need to be done")
else:
if (attrs[0][0] == "checked" and attrs[0][1] == "true"):
logging.info("Here is already done")
def handle_data(self, data):
#print("Encountered some data :", data)
if self.Flag == True:
logging.info(data)
self.Flag = False
self.ToDo.append(data)
else:
pass
def GetResult(self):
result = self.ToDo
self.ToDo = []
return result
# 3bee4c0c-2caf-413c-9e49-d51da6fcdc8c
dev_token = "S=s1:U=92b7b:E=15d39d06877:C=155e21f3928:P=1cd:A=en-devtoken:V=2:H=1304173954fbc76d7432cdf262f7b228"
noteGuid = "1e77d88b-49e6-4410-aaf5-c85c3bb70a0d"
tts = talkey.Talkey()
tts.say("This is a test")
# Sign in the Evernote
client = None
noteStore = None
def SignInEvernote():
global client,noteStore
result = False
try:
client = EvernoteClient(token=dev_token)
userStore = client.get_user_store()
user = userStore.getUser() # here will throw an error
logging.info(user.username)
noteStore = client.get_note_store()
result = True
except Exception, e:
logging.warn(e)
return result
def GetNoteContent(noteGuid):
global noteStore
content = None
try:
content = noteStore.getNoteContent(noteGuid)
except Exception,e:
logging.warn(e)
return content
#parser = MyHTMLParser()
#parser.feed(content)
#This is the Time Out var.
TimeOutIndex = 0
weatherSpeach = None
def weatherInformation():
speach = None
city = "shenzhen"
weather = weatherReport(city)
if weather.getWeather() == True:
speach = ("The weather is %s. Temperature: %.1f. Humidity: %.1f%%. Wind speed: %.1f meters per second" % (weather.weather_desc,weather.temperature,weather.humidity,weather.wind_speed))
logging.info(speach)
return speach
# A new class that used to manage the thread
class GetWeatherInfoThread(threading.Thread):
def __init__(self,timeout = 1.0):
threading.Thread.__init__(self)
self.timeout = timeout
self._running = True
self.weatherSpeach = None
self.subthread = None
def terminate(self):
self._running = False
def runloop(self,TimeInterval):
self._running = True
def TargetFun(self, _TimeInterval):
while self._running:
speach = weatherInformation()
if speach != None:
global Global_Lock
Global_Lock.acquire()
self.weatherSpeach = speach
Global_Lock.release()
else:
pass
import time
time.sleep(_TimeInterval)
self.subthread = threading.Thread(target=TargetFun,args=(self, TimeInterval,))
self.subthread.start()
def isRunning(self):
if self.subthread.is_alive():
return True
else:
return False
# A new class that used to manage the thread
class GetEvernoteThread(threading.Thread):
def __init__(self,timeout = 1.0):
threading.Thread.__init__(self)
self.timeout = timeout
self._running = True
self.content = None
self.subthread = None
def terminate(self):
self._running = False
def runloop(self,TimeInterval,noteGuid):
self._running = True
def TargetFun(self, _TimeInterval,_noteGuid):
while self._running:
content = GetNoteContent(_noteGuid)
if content != None:
global Global_Lock
Global_Lock.acquire()
self.content = content
Global_Lock.release()
else:
pass
import time
time.sleep(_TimeInterval)
self.subthread = threading.Thread(target=TargetFun,args=(self, TimeInterval,noteGuid))
self.subthread.start()
def isRunning(self):
if self.subthread.is_alive():
return True
else:
return False
if __name__ == "__main__":
Task1Weather = GetWeatherInfoThread()
Task1Weather.runloop(5) # The Time Interval is 5 second
SignResult = SignInEvernote()
while SignResult == False:
TimeOutIndex = TimeOutIndex + 1
if TimeOutIndex == 10:
logging.warn("Can't Sign in the Evernote")
TimeOutIndex = 0
break
SignResult = SignInEvernote()
Task2Evernote = GetEvernoteThread()
Task2Evernote.runloop(10,noteGuid)
parser = MyHTMLParser()
logging.info("你好")
while True:
try:
logging.info("This is in loop")
time.sleep(6)
logging.info(Task1Weather.weatherSpeach)
if Task1Weather.weatherSpeach != None:
tts.say(Task1Weather.weatherSpeach)
else:
pass
if Task2Evernote.content != None:
parser.feed(Task2Evernote.content)
content = parser.GetResult()
for result in content:
logging.info("The result is :%s",result)
tts.say(result)
else :
pass
except KeyboardInterrupt:
Task1Weather.terminate()
Task2Evernote.terminate()
exit()
except Exception, e:
logging.info(e)
| 30.084906
| 192
| 0.581373
|
a4c28b6344c59c341beabc1d04f64d66c3041052
| 1,346
|
py
|
Python
|
examples/py/fetch-ohlcv-cex.py
|
diwenshi61/ccxt
|
ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6
|
[
"MIT"
] | 24,910
|
2017-10-27T21:41:59.000Z
|
2022-03-31T23:08:57.000Z
|
examples/py/fetch-ohlcv-cex.py
|
diwenshi61/ccxt
|
ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6
|
[
"MIT"
] | 8,201
|
2017-10-28T10:19:28.000Z
|
2022-03-31T23:49:37.000Z
|
examples/py/fetch-ohlcv-cex.py
|
diwenshi61/ccxt
|
ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6
|
[
"MIT"
] | 6,632
|
2017-10-28T02:53:24.000Z
|
2022-03-31T23:20:14.000Z
|
# -*- coding: utf-8 -*-
import os
import sys
import asciichart
# -----------------------------------------------------------------------------
this_folder = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.dirname(os.path.dirname(this_folder))
sys.path.append(root_folder + '/python')
sys.path.append(this_folder)
# -----------------------------------------------------------------------------
import ccxt # noqa: E402
# -----------------------------------------------------------------------------
exchange = ccxt.cex()
symbol = 'BTC/USD'
# each ohlcv candle is a list of [ timestamp, open, high, low, close, volume ]
index = 4 # use close price from each ohlcv candle
length = 80
height = 15
def print_chart(exchange, symbol, timeframe):
print("\n" + exchange.name + ' ' + symbol + ' ' + timeframe + ' chart:')
# get a list of ohlcv candles
ohlcv = exchange.fetch_ohlcv(symbol, timeframe)
# get the ohlCv (closing price, index == 4)
series = [x[index] for x in ohlcv]
# print the chart
print("\n" + asciichart.plot(series[-length:], {'height': height})) # print the chart
last = ohlcv[len(ohlcv) - 1][index] # last closing price
return last
last = print_chart(exchange, symbol, '1m')
print("\n" + exchange.name + " ₿ = $" + str(last) + "\n") # print last closing price
| 27.469388
| 90
| 0.541605
|
42965654392082e5f7efb172f922d2728802340e
| 7,611
|
py
|
Python
|
service/common/pip_installer.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 213
|
2021-06-11T01:15:16.000Z
|
2022-02-25T16:18:57.000Z
|
service/common/pip_installer.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 32
|
2021-06-17T17:58:54.000Z
|
2022-02-02T05:58:10.000Z
|
service/common/pip_installer.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 28
|
2021-06-17T17:34:21.000Z
|
2022-03-24T14:05:20.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Installs dependencies using pip."""
import importlib
import importlib.util
import logging
import os
import platform
import subprocess
import sys
from typing import Dict, List, NamedTuple
# A dictionary where the key is a platform name
# returned by platform.system() and the value contains a list of supported
# architectures for the package. If no constraints are found for the current
# platform it is assumed the package is available for the platform.
PlatformConstraints = Dict[str, List[str]]
class ModuleInfo(NamedTuple):
"""PIP module installation information and constraints."""
# Name of the module to install.
pip_module_name: str
# Name of the optional module to import to determine whether the required
# pip package is installed. pip_installer searches the module path for this
# module to determine whether the PIP module pip_module_name is installed.
# By setting this value pip_installer can avoid using `pip list`, which is
# a very slow operation, to determine whether a module is installed.
import_module_name: str = ''
# An optional version constraint for the pip module.
version_constraint: str = ''
# Constraints that need to be satisfied for the host platform before trying
# to install the module.
platform_constraints: PlatformConstraints = {}
# Modules required to execute service modules and tests.
_REQUIRED_PYTHON_MODULES = [
ModuleInfo(pip_module_name='absl-py', import_module_name='absl'),
ModuleInfo(pip_module_name='braceexpand', import_module_name='braceexpand'),
ModuleInfo(pip_module_name='numpy', import_module_name='numpy'),
ModuleInfo(pip_module_name='tensorflow', import_module_name='tensorflow',
version_constraint='>=2.5.0',
platform_constraints={'windows': ['64bit']}),
ModuleInfo(pip_module_name='tf-agents', import_module_name='tf_agents',
version_constraint='==0.8.0rc1'),
ModuleInfo(pip_module_name='grpcio-tools',
import_module_name='grpc.tools.protoc'),
ModuleInfo(pip_module_name='googleapis-common-protos',
import_module_name='google.rpc.status_pb2'),
ModuleInfo(pip_module_name='flatbuffers', import_module_name='flatbuffers'),
ModuleInfo(pip_module_name='flufl.lock', import_module_name='flufl.lock'),
]
_PIP_INSTALL_ARGS = [sys.executable, '-m', 'pip', 'install', '--user']
# Cache of installed modules populated by _module_installed().
_INSTALLED_MODULE_LIST = []
class PlatformConstraintError(RuntimeError):
"""Raised if the current platform doesn't support a package."""
pass
def _clear_installed_modules_cache():
"""Flush cache of installed modules."""
global _INSTALLED_MODULE_LIST
_INSTALLED_MODULE_LIST = []
importlib.invalidate_caches()
def find_module_by_name(import_module_name: str, search_path: str = ''):
"""Determine whether a module can be imported.
After calling this method if a module is subsequently installed or made
available via sys.path, the caller must call importlib.invalidate_caches()
before trying to import the newly available module.
Args:
import_module_name: Name of the module to import or check whether it's
installed.
search_path: Optional additional path to add to sys.path to search.
Returns:
True if the module can be imported, False otherwise.
"""
if search_path:
original_sys_path = list(sys.path)
sys.path.append(search_path)
else:
original_sys_path = sys.path
try:
if importlib.util.find_spec(import_module_name):
return True
except ModuleNotFoundError:
pass
finally:
sys.path = original_sys_path
return False
def _module_installed(pip_module_name: str, import_module_name: str):
"""Determine whether a module is installed.
Args:
pip_module_name: Name of the Python module to query.
import_module_name: Optional name of a module to import to check whether
it's installed.
Returns:
True if installed, False otherwise.
Raises:
subprocess.CalledProcessError: If pip fails to list modules.
"""
if import_module_name and find_module_by_name(import_module_name):
return True
global _INSTALLED_MODULE_LIST
if not _INSTALLED_MODULE_LIST and not getattr(sys, 'frozen', False):
logging.debug('Listing installed pip packages')
result = subprocess.run([sys.executable, '-m', 'pip', 'list'],
stdout=subprocess.PIPE, check=True)
# Each line consists of "module_name\w+version", extract the module name
# from each line.
_INSTALLED_MODULE_LIST = [
l.split()[0] for l in result.stdout.decode('utf-8').splitlines()]
logging.debug('Found following installed packages: %s',
_INSTALLED_MODULE_LIST)
return pip_module_name in _INSTALLED_MODULE_LIST
def _install_module(module: str, version: str):
"""Install a Python module if the application isn't frozen.
Args:
module: Name of the module to install.
version: Version constraints for the module to install or an empty string
to install the latest.
Raises:
subprocess.CalledProcessError: If module installation fails.
"""
if not getattr(sys, 'frozen', False):
logging.info('Installing Python module %s...', module)
subprocess.check_call(_PIP_INSTALL_ARGS + [f'{module}{version}'])
def _check_platform_constraints(module: str, constraints: PlatformConstraints):
"""Check platform constraints for a module.
Args:
module: Name of the module.
constraints: Platform constraints dictionary, where the key is a platform
name returned by platform.system() and the value contains a list of
supported architectures for the package. If no constraints are found for
the current platform it is assumed the package is available for the
platform.
Raises:
PlatformConstraintError: If the platform doesn't meet the specified
constraints.
"""
system_name = platform.system().lower()
architecture = platform.architecture()
platform_constraints = constraints.get(system_name)
supported = (not platform_constraints or
any(a for a in architecture if a in platform_constraints))
if not supported:
raise PlatformConstraintError(
f'pip package {module} requires architecture {platform_constraints} '
f'on {system_name} but the current Python environment has '
f'architecture {architecture}. Try installing a different interpreter.')
def install_dependencies():
"""Install all Python module dependencies."""
modules_installed = False
for info in _REQUIRED_PYTHON_MODULES:
_check_platform_constraints(info.pip_module_name, info.platform_constraints)
if not _module_installed(info.pip_module_name, info.import_module_name):
_install_module(info.pip_module_name, info.version_constraint)
modules_installed = True
if modules_installed:
_clear_installed_modules_cache()
if int(os.environ.get('FALKEN_AUTO_INSTALL_DEPENDENCIES', 1)):
install_dependencies()
| 37.126829
| 80
| 0.74169
|
00517c65c660395f56e52f44b6a78e74d0cbc830
| 57,820
|
py
|
Python
|
release/scripts/startup/bl_ui/properties_constraint.py
|
linluofeng/upbge
|
50bc9bc923a41411461d662c0fddd58d1f0b3ab3
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/startup/bl_ui/properties_constraint.py
|
linluofeng/upbge
|
50bc9bc923a41411461d662c0fddd58d1f0b3ab3
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/startup/bl_ui/properties_constraint.py
|
linluofeng/upbge
|
50bc9bc923a41411461d662c0fddd58d1f0b3ab3
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from bpy.types import Panel
class ObjectConstraintPanel(Panel):
bl_context = "constraint"
@classmethod
def poll(cls, context):
return (context.object)
class BoneConstraintPanel(Panel):
bl_context = "bone_constraint"
@classmethod
def poll(cls, context):
return (context.pose_bone)
class OBJECT_PT_constraints(ObjectConstraintPanel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_label = "Object Constraints"
bl_options = {'HIDE_HEADER'}
def draw(self, context):
layout = self.layout
layout.operator_menu_enum("object.constraint_add", "type", text="Add Object Constraint")
layout.template_constraints(use_bone_constraints=False)
class BONE_PT_constraints(BoneConstraintPanel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_label = "Bone Constraints"
bl_options = {'HIDE_HEADER'}
def draw(self, context):
layout = self.layout
layout.operator_menu_enum("pose.constraint_add", "type", text="Add Bone Constraint")
layout.template_constraints(use_bone_constraints=True)
# Parent class for constraint panels, with templates and drawing methods
# shared between the bone and object constraint panels
class ConstraintButtonsPanel(Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_label = ""
bl_options = {'INSTANCED', 'HEADER_LAYOUT_EXPAND', 'DRAW_BOX'}
@staticmethod
def draw_influence(layout, con):
layout.separator()
if con.type in {'IK', 'SPLINE_IK'}:
# constraint.disable_keep_transform doesn't work well
# for these constraints.
layout.prop(con, "influence")
else:
row = layout.row(align=True)
row.prop(con, "influence")
row.operator("constraint.disable_keep_transform", text="", icon='CANCEL')
@staticmethod
def space_template(layout, con, target=True, owner=True):
if target or owner:
layout.separator()
if target:
layout.prop(con, "target_space", text="Target")
if owner:
layout.prop(con, "owner_space", text="Owner")
@staticmethod
def target_template(layout, con, subtargets=True):
col = layout.column()
col.prop(con, "target") # XXX limiting settings for only 'curves' or some type of object
if con.target and subtargets:
if con.target.type == 'ARMATURE':
col.prop_search(con, "subtarget", con.target.data, "bones", text="Bone")
if con.subtarget and hasattr(con, "head_tail"):
row = col.row(align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "head_tail")
# XXX icon, and only when bone has segments?
sub.prop(con, "use_bbone_shape", text="", icon='IPO_BEZIER')
row.prop_decorator(con, "head_tail")
elif con.target.type in {'MESH', 'LATTICE'}:
col.prop_search(con, "subtarget", con.target, "vertex_groups", text="Vertex Group")
def get_constraint(self, context):
con = self.custom_data
self.layout.context_pointer_set("constraint", con)
return con
def draw_header(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.template_constraint_header(con)
# Drawing methods for specific constraints. (Shared by object and bone constraint panels)
def draw_childof(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
row = layout.row(heading="Location")
row.use_property_decorate = False
row.prop(con, "use_location_x", text="X", toggle=True)
row.prop(con, "use_location_y", text="Y", toggle=True)
row.prop(con, "use_location_z", text="Z", toggle=True)
row.label(icon='BLANK1')
row = layout.row(heading="Rotation")
row.use_property_decorate = False
row.prop(con, "use_rotation_x", text="X", toggle=True)
row.prop(con, "use_rotation_y", text="Y", toggle=True)
row.prop(con, "use_rotation_z", text="Z", toggle=True)
row.label(icon='BLANK1')
row = layout.row(heading="Scale")
row.use_property_decorate = False
row.prop(con, "use_scale_x", text="X", toggle=True)
row.prop(con, "use_scale_y", text="Y", toggle=True)
row.prop(con, "use_scale_z", text="Z", toggle=True)
row.label(icon='BLANK1')
row = layout.row()
row.operator("constraint.childof_set_inverse")
row.operator("constraint.childof_clear_inverse")
self.draw_influence(layout, con)
def draw_trackto(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "track_axis", expand=True)
layout.prop(con, "up_axis", text="Up", expand=True)
layout.prop(con, "use_target_z")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_follow_path(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
if con.use_fixed_location:
layout.prop(con, "offset_factor", text="Offset Factor")
else:
layout.prop(con, "offset")
layout.prop(con, "forward_axis", expand=True)
layout.prop(con, "up_axis", expand=True)
col = layout.column()
col.prop(con, "use_fixed_location")
col.prop(con, "use_curve_radius")
col.prop(con, "use_curve_follow")
layout.operator("constraint.followpath_path_animate", text="Animate Path", icon='ANIM_DATA')
self.draw_influence(layout, con)
def draw_rot_limit(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
# Decorators and property split are really buggy with these properties
row = layout.row(heading="Limit X", align=True)
row.use_property_decorate = False
row.prop(con, "use_limit_x", text="")
sub = row.column(align=True)
sub.active = con.use_limit_x
sub.prop(con, "min_x", text="Min")
sub.prop(con, "max_x", text="Max")
row.label(icon="BLANK1")
row = layout.row(heading="Y", align=True)
row.use_property_decorate = False
row.prop(con, "use_limit_y", text="")
sub = row.column(align=True)
sub.active = con.use_limit_y
sub.prop(con, "min_y", text="Min")
sub.prop(con, "max_y", text="Max")
row.label(icon="BLANK1")
row = layout.row(heading="Z", align=True)
row.use_property_decorate = False
row.prop(con, "use_limit_z", text="")
sub = row.column(align=True)
sub.active = con.use_limit_z
sub.prop(con, "min_z", text="Min")
sub.prop(con, "max_z", text="Max")
row.label(icon="BLANK1")
layout.prop(con, "use_transform_limit")
layout.prop(con, "owner_space")
self.draw_influence(layout, con)
def draw_loc_limit(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
col = layout.column()
row = col.row(heading="Minimum X", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_min_x", text="")
subsub = sub.row(align=True)
subsub.active = con.use_min_x
subsub.prop(con, "min_x", text="")
row.prop_decorator(con, "min_x")
row = col.row(heading="Y", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_min_y", text="")
subsub = sub.row(align=True)
subsub.active = con.use_min_y
subsub.prop(con, "min_y", text="")
row.prop_decorator(con, "min_y")
row = col.row(heading="Z", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_min_z", text="")
subsub = sub.row(align=True)
subsub.active = con.use_min_z
subsub.prop(con, "min_z", text="")
row.prop_decorator(con, "min_z")
col.separator()
row = col.row(heading="Maximum X", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_max_x", text="")
subsub = sub.row(align=True)
subsub.active = con.use_max_x
subsub.prop(con, "max_x", text="")
row.prop_decorator(con, "max_x")
row = col.row(heading="Y", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_max_y", text="")
subsub = sub.row(align=True)
subsub.active = con.use_max_y
subsub.prop(con, "max_y", text="")
row.prop_decorator(con, "max_y")
row = col.row(heading="Z", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_max_z", text="")
subsub = sub.row(align=True)
subsub.active = con.use_max_z
subsub.prop(con, "max_z", text="")
row.prop_decorator(con, "max_z")
layout.prop(con, "use_transform_limit")
layout.prop(con, "owner_space")
self.draw_influence(layout, con)
def draw_size_limit(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
col = layout.column()
row = col.row(heading="Minimum X", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_min_x", text="")
subsub = sub.row(align=True)
subsub.active = con.use_min_x
subsub.prop(con, "min_x", text="")
row.prop_decorator(con, "min_x")
row = col.row(heading="Y", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_min_y", text="")
subsub = sub.row(align=True)
subsub.active = con.use_min_y
subsub.prop(con, "min_y", text="")
row.prop_decorator(con, "min_y")
row = col.row(heading="Z", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_min_z", text="")
subsub = sub.row(align=True)
subsub.active = con.use_min_z
subsub.prop(con, "min_z", text="")
row.prop_decorator(con, "min_z")
col.separator()
row = col.row(heading="Maximum X", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_max_x", text="")
subsub = sub.row(align=True)
subsub.active = con.use_max_x
subsub.prop(con, "max_x", text="")
row.prop_decorator(con, "max_x")
row = col.row(heading="Y", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_max_y", text="")
subsub = sub.row(align=True)
subsub.active = con.use_max_y
subsub.prop(con, "max_y", text="")
row.prop_decorator(con, "max_y")
row = col.row(heading="Z", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_max_z", text="")
subsub = sub.row(align=True)
subsub.active = con.use_max_z
subsub.prop(con, "max_z", text="")
row.prop_decorator(con, "max_z")
layout.prop(con, "use_transform_limit")
layout.prop(con, "owner_space")
self.draw_influence(layout, con)
def draw_rotate_like(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "euler_order", text="Order")
row = layout.row(heading="Axis", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_x", text="X", toggle=True)
sub.prop(con, "use_y", text="Y", toggle=True)
sub.prop(con, "use_z", text="Z", toggle=True)
row.label(icon='BLANK1')
row = layout.row(heading="Invert", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "invert_x", text="X", toggle=True)
sub.prop(con, "invert_y", text="Y", toggle=True)
sub.prop(con, "invert_z", text="Z", toggle=True)
row.label(icon='BLANK1')
layout.prop(con, "mix_mode", text="Mix")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_locate_like(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
row = layout.row(heading="Axis", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_x", text="X", toggle=True)
sub.prop(con, "use_y", text="Y", toggle=True)
sub.prop(con, "use_z", text="Z", toggle=True)
row.label(icon='BLANK1')
row = layout.row(heading="Invert", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "invert_x", text="X", toggle=True)
sub.prop(con, "invert_y", text="Y", toggle=True)
sub.prop(con, "invert_z", text="Z", toggle=True)
row.label(icon='BLANK1')
layout.prop(con, "use_offset")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_size_like(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
row = layout.row(heading="Axis", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_x", text="X", toggle=True)
sub.prop(con, "use_y", text="Y", toggle=True)
sub.prop(con, "use_z", text="Z", toggle=True)
row.label(icon='BLANK1')
col = layout.column()
col.prop(con, "power")
col.prop(con, "use_make_uniform")
col.prop(con, "use_offset")
row = col.row()
row.active = con.use_offset
row.prop(con, "use_add")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_same_volume(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
layout.prop(con, "mode")
row = layout.row(heading="Free Axis")
row.prop(con, "free_axis", expand=True)
layout.prop(con, "volume")
layout.prop(con, "owner_space")
self.draw_influence(layout, con)
def draw_trans_like(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "mix_mode", text="Mix")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_action(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
target_row = layout.row(align=True)
target_row.active = not con.use_eval_time
self.target_template(target_row, con)
row = layout.row(align=True, heading="Evaluation Time")
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_eval_time", text="")
subsub = sub.row(align=True)
subsub.active = con.use_eval_time
subsub.prop(con, "eval_time", text="")
row.prop_decorator(con, "eval_time")
layout.prop(con, "mix_mode", text="Mix")
self.draw_influence(layout, con)
def draw_lock_track(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "track_axis", expand=True)
layout.prop(con, "lock_axis", expand=True)
self.draw_influence(layout, con)
def draw_dist_limit(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
row = layout.row()
row.prop(con, "distance")
row.operator("constraint.limitdistance_reset", text="", icon="X")
layout.prop(con, "limit_mode", text="Clamp Region")
layout.prop(con, "use_transform_limit")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_stretch_to(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
row = layout.row()
row.prop(con, "rest_length")
row.operator("constraint.stretchto_reset", text="", icon="X")
layout.separator()
col = layout.column()
col.prop(con, "bulge", text="Volume Variation")
row = col.row(heading="Volume Min", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_bulge_min", text="")
subsub = sub.row(align=True)
subsub.active = con.use_bulge_min
subsub.prop(con, "bulge_min", text="")
row.prop_decorator(con, "bulge_min")
row = col.row(heading="Max", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_bulge_max", text="")
subsub = sub.row(align=True)
subsub.active = con.use_bulge_max
subsub.prop(con, "bulge_max", text="")
row.prop_decorator(con, "bulge_max")
row = col.row()
row.active = con.use_bulge_min or con.use_bulge_max
row.prop(con, "bulge_smooth", text="Smooth")
layout.prop(con, "volume", expand=True)
layout.prop(con, "keep_axis", text="Rotation", expand=True)
self.draw_influence(layout, con)
def draw_min_max(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "offset")
layout.prop(con, "floor_location", expand=True, text="Min/Max")
layout.prop(con, "use_rotation")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_rigid_body_joint(self, context):
layout = self.layout
con = self.get_constraint(context)
self.target_template(layout, con, subtargets=False)
layout.prop(con, "pivot_type")
layout.prop(con, "child")
row = layout.row()
row.prop(con, "use_linked_collision", text="Linked Collision")
row.prop(con, "show_pivot", text="Display Pivot")
row = layout.row()
row.prop(con, "use_breaking")
row = row.row()
row.active = con.use_breaking
row.prop(con, "breaking_threshold")
split = layout.split()
col = split.column(align=True)
col.label(text="Pivot:")
col.prop(con, "pivot_x", text="X")
col.prop(con, "pivot_y", text="Y")
col.prop(con, "pivot_z", text="Z")
col = split.column(align=True)
col.label(text="Axis:")
col.prop(con, "axis_x", text="X")
col.prop(con, "axis_y", text="Y")
col.prop(con, "axis_z", text="Z")
if con.pivot_type == 'CONE_TWIST':
layout.label(text="Limits:")
split = layout.split()
col = split.column()
col.prop(con, "use_angular_limit_x", text="Angle X")
sub = col.column()
sub.active = con.use_angular_limit_x
sub.prop(con, "limit_angle_max_x", text="")
col = split.column()
col.prop(con, "use_angular_limit_y", text="Angle Y")
sub = col.column()
sub.active = con.use_angular_limit_y
sub.prop(con, "limit_angle_max_y", text="")
col = split.column()
col.prop(con, "use_angular_limit_z", text="Angle Z")
sub = col.column()
sub.active = con.use_angular_limit_z
sub.prop(con, "limit_angle_max_z", text="")
elif con.pivot_type == 'GENERIC_6_DOF':
layout.label(text="Limits:")
split = layout.split()
col = split.column(align=True)
col.prop(con, "use_limit_x", text="X")
sub = col.column(align=True)
sub.active = con.use_limit_x
sub.prop(con, "limit_min_x", text="Min")
sub.prop(con, "limit_max_x", text="Max")
col = split.column(align=True)
col.prop(con, "use_limit_y", text="Y")
sub = col.column(align=True)
sub.active = con.use_limit_y
sub.prop(con, "limit_min_y", text="Min")
sub.prop(con, "limit_max_y", text="Max")
col = split.column(align=True)
col.prop(con, "use_limit_z", text="Z")
sub = col.column(align=True)
sub.active = con.use_limit_z
sub.prop(con, "limit_min_z", text="Min")
sub.prop(con, "limit_max_z", text="Max")
split = layout.split()
col = split.column(align=True)
col.prop(con, "use_angular_limit_x", text="Angle X")
sub = col.column(align=True)
sub.active = con.use_angular_limit_x
sub.prop(con, "limit_angle_min_x", text="Min")
sub.prop(con, "limit_angle_max_x", text="Max")
col = split.column(align=True)
col.prop(con, "use_angular_limit_y", text="Angle Y")
sub = col.column(align=True)
sub.active = con.use_angular_limit_y
sub.prop(con, "limit_angle_min_y", text="Min")
sub.prop(con, "limit_angle_max_y", text="Max")
col = split.column(align=True)
col.prop(con, "use_angular_limit_z", text="Angle Z")
sub = col.column(align=True)
sub.active = con.use_angular_limit_z
sub.prop(con, "limit_angle_min_z", text="Min")
sub.prop(con, "limit_angle_max_z", text="Max")
elif con.pivot_type == 'HINGE':
layout.label(text="Limits:")
split = layout.split()
row = split.row(align=True)
col = row.column()
col.prop(con, "use_angular_limit_x", text="Angle X")
col = row.column()
col.active = con.use_angular_limit_x
col.prop(con, "limit_angle_min_x", text="Min")
col = row.column()
col.active = con.use_angular_limit_x
col.prop(con, "limit_angle_max_x", text="Max")
def draw_clamp_to(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "main_axis", expand=True)
layout.prop(con, "use_cyclic")
self.draw_influence(layout, con)
def draw_transform(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "use_motion_extrapolate", text="Extrapolate")
self.space_template(layout, con)
self.draw_influence(layout, con)
def draw_shrinkwrap(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con, False)
layout.prop(con, "distance")
layout.prop(con, "shrinkwrap_type", text="Mode")
layout.separator()
if con.shrinkwrap_type == 'PROJECT':
layout.prop(con, "project_axis", expand=True, text="Project Axis")
layout.prop(con, "project_axis_space", text="Space")
layout.prop(con, "project_limit", text="Distance")
layout.prop(con, "use_project_opposite")
layout.separator()
col = layout.column()
row = col.row()
row.prop(con, "cull_face", expand=True)
row = col.row()
row.active = con.use_project_opposite and con.cull_face != 'OFF'
row.prop(con, "use_invert_cull")
layout.separator()
if con.shrinkwrap_type in {'PROJECT', 'NEAREST_SURFACE', 'TARGET_PROJECT'}:
layout.prop(con, "wrap_mode", text="Snap Mode")
row = layout.row(heading="Align to Normal", align=True)
row.use_property_decorate = False
sub = row.row(align=True)
sub.prop(con, "use_track_normal", text="")
subsub = sub.row(align=True)
subsub.active = con.use_track_normal
subsub.prop(con, "track_axis", text="")
row.prop_decorator(con, "track_axis")
self.draw_influence(layout, con)
def draw_damp_track(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
layout.prop(con, "track_axis", expand=True)
self.draw_influence(layout, con)
def draw_spline_ik(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
self.draw_influence(layout, con)
def draw_pivot(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
if con.target:
layout.prop(con, "offset", text="Pivot Offset")
else:
layout.prop(con, "use_relative_location")
if con.use_relative_location:
layout.prop(con, "offset", text="Pivot Point")
else:
layout.prop(con, "offset", text="Pivot Point")
col = layout.column()
col.prop(con, "rotation_range", text="Rotation Range")
self.draw_influence(layout, con)
def draw_follow_track(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
clip = None
if con.use_active_clip:
clip = context.scene.active_clip
else:
clip = con.clip
layout.prop(con, "use_active_clip")
layout.prop(con, "use_3d_position")
row = layout.row()
row.active = not con.use_3d_position
row.prop(con, "use_undistorted_position")
if not con.use_active_clip:
layout.prop(con, "clip")
layout.prop(con, "frame_method")
if clip:
tracking = clip.tracking
layout.prop_search(con, "object", tracking, "objects", icon='OBJECT_DATA')
tracking_object = tracking.objects.get(con.object, tracking.objects[0])
layout.prop_search(con, "track", tracking_object, "tracks", icon='ANIM_DATA')
layout.prop(con, "camera")
row = layout.row()
row.active = not con.use_3d_position
row.prop(con, "depth_object")
layout.operator("clip.constraint_to_fcurve")
self.draw_influence(layout, con)
def draw_camera_solver(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
layout.prop(con, "use_active_clip")
if not con.use_active_clip:
layout.prop(con, "clip")
layout.operator("clip.constraint_to_fcurve")
self.draw_influence(layout, con)
def draw_object_solver(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
clip = None
if con.use_active_clip:
clip = context.scene.active_clip
else:
clip = con.clip
layout.prop(con, "use_active_clip")
if not con.use_active_clip:
layout.prop(con, "clip")
if clip:
layout.prop_search(con, "object", clip.tracking, "objects", icon='OBJECT_DATA')
layout.prop(con, "camera")
row = layout.row()
row.operator("constraint.objectsolver_set_inverse")
row.operator("constraint.objectsolver_clear_inverse")
layout.operator("clip.constraint_to_fcurve")
self.draw_influence(layout, con)
def draw_transform_cache(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
layout.template_cache_file(con, "cache_file")
cache_file = con.cache_file
if cache_file is not None:
layout.prop_search(con, "object_path", cache_file, "object_paths")
self.draw_influence(layout, con)
def draw_python_constraint(self, context):
layout = self.layout
layout.label(text="Blender 2.6 doesn't support python constraints yet")
def draw_armature(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
col = layout.column()
col.prop(con, "use_deform_preserve_volume")
col.prop(con, "use_bone_envelopes")
if context.pose_bone:
col.prop(con, "use_current_location")
layout.operator("constraint.add_target", text="Add Target Bone")
layout.operator("constraint.normalize_target_weights")
self.draw_influence(layout, con)
if not con.targets:
layout.label(text="No target bones added", icon='ERROR')
def draw_kinematic(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
self.target_template(layout, con)
if context.object.pose.ik_solver == 'ITASC':
layout.prop(con, "ik_type")
# This button gives itself too much padding, so put it in a column with the subtarget
col = layout.column()
col.prop(con, "pole_target")
if con.pole_target and con.pole_target.type == 'ARMATURE':
col.prop_search(con, "pole_subtarget", con.pole_target.data, "bones", text="Bone")
col = layout.column()
if con.pole_target:
col.prop(con, "pole_angle")
col.prop(con, "use_tail")
col.prop(con, "use_stretch")
col.prop(con, "chain_count")
if con.ik_type == 'COPY_POSE':
layout.prop(con, "reference_axis", expand=True)
# Use separate rows and columns here to avoid an alignment issue with the lock buttons
loc_col = layout.column()
loc_col.prop(con, "use_location")
row = loc_col.row()
row.active = con.use_location
row.prop(con, "weight", text="Weight", slider=True)
row = loc_col.row(heading="Lock", align=True)
row.use_property_decorate = False
row.active = con.use_location
sub = row.row(align=True)
sub.prop(con, "lock_location_x", text="X", toggle=True)
sub.prop(con, "lock_location_y", text="Y", toggle=True)
sub.prop(con, "lock_location_z", text="Z", toggle=True)
row.label(icon='BLANK1')
rot_col = layout.column()
rot_col.prop(con, "use_rotation")
row = rot_col.row()
row.active = con.use_rotation
row.prop(con, "orient_weight", text="Weight", slider=True)
row = rot_col.row(heading="Lock", align=True)
row.use_property_decorate = False
row.active = con.use_rotation
sub = row.row(align=True)
sub.prop(con, "lock_rotation_x", text="X", toggle=True)
sub.prop(con, "lock_rotation_y", text="Y", toggle=True)
sub.prop(con, "lock_rotation_z", text="Z", toggle=True)
row.label(icon='BLANK1')
elif con.ik_type == 'DISTANCE':
layout.prop(con, "limit_mode")
col = layout.column()
col.prop(con, "weight", text="Weight", slider=True)
col.prop(con, "distance", text="Distance", slider=True)
else:
# Standard IK constraint
col = layout.column()
col.prop(con, "pole_target")
if con.pole_target and con.pole_target.type == 'ARMATURE':
col.prop_search(con, "pole_subtarget", con.pole_target.data, "bones", text="Bone")
col = layout.column()
if con.pole_target:
col.prop(con, "pole_angle")
col.prop(con, "iterations")
col.prop(con, "chain_count")
col.prop(con, "use_tail")
col.prop(con, "use_stretch")
col = layout.column()
row = col.row(align=True, heading="Weight Position")
row.prop(con, "use_location", text="")
sub = row.row(align=True)
sub.active = con.use_location
sub.prop(con, "weight", text="", slider=True)
row = col.row(align=True, heading="Rotation")
row.prop(con, "use_rotation", text="")
sub = row.row(align=True)
sub.active = con.use_rotation
sub.prop(con, "orient_weight", text="", slider=True)
self.draw_influence(layout, con)
# Parent class for constraint subpanels
class ConstraintButtonsSubPanel(Panel):
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_label = ""
bl_options = {'DRAW_BOX'}
def get_constraint(self, context):
con = self.custom_data
self.layout.context_pointer_set("constraint", con)
return con
def draw_transform_from(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.prop(con, "map_from", expand=True)
layout.use_property_split = True
layout.use_property_decorate = True
from_axes = [con.map_to_x_from, con.map_to_y_from, con.map_to_z_from]
if con.map_from == 'ROTATION':
layout.prop(con, "from_rotation_mode", text="Mode")
ext = "" if con.map_from == 'LOCATION' else "_rot" if con.map_from == 'ROTATION' else "_scale"
col = layout.column(align=True)
col.active = "X" in from_axes
col.prop(con, "from_min_x" + ext, text="X Min")
col.prop(con, "from_max_x" + ext, text="Max")
col = layout.column(align=True)
col.active = "Y" in from_axes
col.prop(con, "from_min_y" + ext, text="Y Min")
col.prop(con, "from_max_y" + ext, text="Max")
col = layout.column(align=True)
col.active = "Z" in from_axes
col.prop(con, "from_min_z" + ext, text="Z Min")
col.prop(con, "from_max_z" + ext, text="Max")
def draw_transform_to(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.prop(con, "map_to", expand=True)
layout.use_property_split = True
layout.use_property_decorate = True
if con.map_to == 'ROTATION':
layout.prop(con, "to_euler_order", text="Order")
ext = "" if con.map_to == 'LOCATION' else "_rot" if con.map_to == 'ROTATION' else "_scale"
col = layout.column(align=True)
col.prop(con, "map_to_x_from", expand=False, text="X Source Axis")
col.prop(con, "to_min_x" + ext, text="Min")
col.prop(con, "to_max_x" + ext, text="Max")
col = layout.column(align=True)
col.prop(con, "map_to_y_from", expand=False, text="Y Source Axis")
col.prop(con, "to_min_y" + ext, text="Min")
col.prop(con, "to_max_y" + ext, text="Max")
col = layout.column(align=True)
col.prop(con, "map_to_z_from", expand=False, text="Z Source Axis")
col.prop(con, "to_min_z" + ext, text="Min")
col.prop(con, "to_max_z" + ext, text="Max")
layout.prop(con, "mix_mode" + ext, text="Mix")
def draw_armature_bones(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
for i, tgt in enumerate(con.targets):
has_target = tgt.target is not None
box = layout.box()
header = box.row()
header.use_property_split = False
split = header.split(factor=0.45, align=True)
split.prop(tgt, "target", text="")
row = split.row(align=True)
row.active = has_target
if has_target:
row.prop_search(tgt, "subtarget", tgt.target.data, "bones", text="")
else:
row.prop(tgt, "subtarget", text="", icon='BONE_DATA')
header.operator("constraint.remove_target", text="", icon='X').index = i
row = box.row()
row.active = has_target and tgt.subtarget != ""
row.prop(tgt, "weight", slider=True, text="Weight")
def draw_spline_ik_fitting(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
col = layout.column()
col.prop(con, "chain_count")
col.prop(con, "use_even_divisions")
col.prop(con, "use_chain_offset")
def draw_spline_ik_chain_scaling(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
layout.prop(con, "use_curve_radius")
layout.prop(con, "y_scale_mode")
layout.prop(con, "xz_scale_mode")
if con.xz_scale_mode in {'INVERSE_PRESERVE', 'VOLUME_PRESERVE'}:
layout.prop(con, "use_original_scale")
if con.xz_scale_mode == 'VOLUME_PRESERVE':
col = layout.column()
col.prop(con, "bulge", text="Volume Variation")
row = col.row(heading="Volume Min")
row.prop(con, "use_bulge_min", text="")
sub = row.row()
sub.active = con.use_bulge_min
sub.prop(con, "bulge_min", text="")
row = col.row(heading="Max")
row.prop(con, "use_bulge_max", text="")
sub = row.row()
sub.active = con.use_bulge_max
sub.prop(con, "bulge_max", text="")
row = layout.row()
row.active = con.use_bulge_min or con.use_bulge_max
row.prop(con, "bulge_smooth", text="Smooth")
def draw_action_target(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
col = layout.column()
col.active = not con.use_eval_time
col.prop(con, "transform_channel", text="Channel")
col.prop(con, "target_space")
sub = col.column(align=True)
sub.prop(con, "min", text="Range Min")
sub.prop(con, "max", text="Max")
def draw_action_action(self, context):
layout = self.layout
con = self.get_constraint(context)
layout.use_property_split = True
layout.use_property_decorate = True
layout.prop(con, "action")
layout.prop(con, "use_bone_object_action")
col = layout.column(align=True)
col.prop(con, "frame_start", text="Frame Start")
col.prop(con, "frame_end", text="End")
# Child Of Constraint
class OBJECT_PT_bChildOfConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_childof(context)
class BONE_PT_bChildOfConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_childof(context)
# Track To Constraint
class OBJECT_PT_bTrackToConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_trackto(context)
class BONE_PT_bTrackToConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_trackto(context)
# Follow Path Constraint
class OBJECT_PT_bFollowPathConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_follow_path(context)
class BONE_PT_bFollowPathConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_follow_path(context)
# Rotation Limit Constraint
class OBJECT_PT_bRotLimitConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_rot_limit(context)
class BONE_PT_bRotLimitConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_rot_limit(context)
# Location Limit Constraint
class OBJECT_PT_bLocLimitConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_loc_limit(context)
class BONE_PT_bLocLimitConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_loc_limit(context)
# Size Limit Constraint
class OBJECT_PT_bSizeLimitConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_size_limit(context)
class BONE_PT_bSizeLimitConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_size_limit(context)
# Rotate Like Constraint
class OBJECT_PT_bRotateLikeConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_rotate_like(context)
class BONE_PT_bRotateLikeConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_rotate_like(context)
# Locate Like Constraint
class OBJECT_PT_bLocateLikeConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_locate_like(context)
class BONE_PT_bLocateLikeConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_locate_like(context)
# Size Like Constraint
class OBJECT_PT_bSizeLikeConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_size_like(context)
class BONE_PT_bSizeLikeConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_size_like(context)
# Same Volume Constraint
class OBJECT_PT_bSameVolumeConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_same_volume(context)
class BONE_PT_bSameVolumeConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_same_volume(context)
# Trans Like Constraint
class OBJECT_PT_bTransLikeConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_trans_like(context)
class BONE_PT_bTransLikeConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_trans_like(context)
# Action Constraint
class OBJECT_PT_bActionConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_action(context)
class BONE_PT_bActionConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_action(context)
class OBJECT_PT_bActionConstraint_target(ObjectConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "OBJECT_PT_bActionConstraint"
bl_label = "Target"
def draw(self, context):
self.draw_action_target(context)
class BONE_PT_bActionConstraint_target(BoneConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "BONE_PT_bActionConstraint"
bl_label = "Target"
def draw(self, context):
self.draw_action_target(context)
class OBJECT_PT_bActionConstraint_action(ObjectConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "OBJECT_PT_bActionConstraint"
bl_label = "Action"
def draw(self, context):
self.draw_action_action(context)
class BONE_PT_bActionConstraint_action(BoneConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "BONE_PT_bActionConstraint"
bl_label = "Action"
def draw(self, context):
self.draw_action_action(context)
# Lock Track Constraint
class OBJECT_PT_bLockTrackConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_lock_track(context)
class BONE_PT_bLockTrackConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_lock_track(context)
# Disance Limit Constraint
class OBJECT_PT_bDistLimitConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_dist_limit(context)
class BONE_PT_bDistLimitConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_dist_limit(context)
# Stretch To Constraint
class OBJECT_PT_bStretchToConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_stretch_to(context)
class BONE_PT_bStretchToConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_stretch_to(context)
# Min Max Constraint
class OBJECT_PT_bMinMaxConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_min_max(context)
class BONE_PT_bMinMaxConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_min_max(context)
# Clamp To Constraint
class OBJECT_PT_bClampToConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_clamp_to(context)
class BONE_PT_bClampToConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_clamp_to(context)
# Rigid Body Joint Constraint
class OBJECT_PT_bRigidBodyJointConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_rigid_body_joint(context)
# Transform Constraint
class OBJECT_PT_bTransformConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_transform(context)
class BONE_PT_bTransformConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_transform(context)
class OBJECT_PT_bTransformConstraint_source(ObjectConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "OBJECT_PT_bTransformConstraint"
bl_label = "Map From"
def draw(self, context):
self.draw_transform_from(context)
class BONE_PT_bTransformConstraint_from(BoneConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "BONE_PT_bTransformConstraint"
bl_label = "Map From"
def draw(self, context):
self.draw_transform_from(context)
class OBJECT_PT_bTransformConstraint_destination(ObjectConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "OBJECT_PT_bTransformConstraint"
bl_label = "Map To"
def draw(self, context):
self.draw_transform_to(context)
class BONE_PT_bTransformConstraint_to(BoneConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "BONE_PT_bTransformConstraint"
bl_label = "Map To"
def draw(self, context):
self.draw_transform_to(context)
# Shrinkwrap Constraint
class OBJECT_PT_bShrinkwrapConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_shrinkwrap(context)
class BONE_PT_bShrinkwrapConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_shrinkwrap(context)
# Damp Track Constraint
class OBJECT_PT_bDampTrackConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_damp_track(context)
class BONE_PT_bDampTrackConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_damp_track(context)
# Spline IK Constraint
class BONE_PT_bSplineIKConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_spline_ik(context)
class BONE_PT_bSplineIKConstraint_fitting(BoneConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "BONE_PT_bSplineIKConstraint"
bl_label = "Fitting"
def draw(self, context):
self.draw_spline_ik_fitting(context)
class BONE_PT_bSplineIKConstraint_chain_scaling(BoneConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "BONE_PT_bSplineIKConstraint"
bl_label = "Chain Scaling"
def draw(self, context):
self.draw_spline_ik_chain_scaling(context)
# Pivot Constraint
class OBJECT_PT_bPivotConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_pivot(context)
class BONE_PT_bPivotConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_pivot(context)
# Follow Track Constraint
class OBJECT_PT_bFollowTrackConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_follow_track(context)
class BONE_PT_bFollowTrackConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_follow_track(context)
# Camera Solver Constraint
class OBJECT_PT_bCameraSolverConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_camera_solver(context)
class BONE_PT_bCameraSolverConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_camera_solver(context)
# Object Solver Constraint
class OBJECT_PT_bObjectSolverConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_object_solver(context)
class BONE_PT_bObjectSolverConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_object_solver(context)
# Transform Cache Constraint
class OBJECT_PT_bTransformCacheConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_transform_cache(context)
class BONE_PT_bTransformCacheConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_transform_cache(context)
# Python Constraint
class OBJECT_PT_bPythonConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_python_constraint(context)
class BONE_PT_bPythonConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_python_constraint(context)
# Armature Constraint
class OBJECT_PT_bArmatureConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_armature(context)
class BONE_PT_bArmatureConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_armature(context)
class OBJECT_PT_bArmatureConstraint_bones(ObjectConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "OBJECT_PT_bArmatureConstraint"
bl_label = "Bones"
def draw(self, context):
self.draw_armature_bones(context)
class BONE_PT_bArmatureConstraint_bones(BoneConstraintPanel, ConstraintButtonsSubPanel):
bl_parent_id = "BONE_PT_bArmatureConstraint"
bl_label = "Bones"
def draw(self, context):
self.draw_armature_bones(context)
# Inverse Kinematic Constraint
class OBJECT_PT_bKinematicConstraint(ObjectConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_kinematic(context)
class BONE_PT_bKinematicConstraint(BoneConstraintPanel, ConstraintButtonsPanel):
def draw(self, context):
self.draw_kinematic(context)
classes = (
# Object Panels
OBJECT_PT_constraints,
BONE_PT_constraints,
OBJECT_PT_bChildOfConstraint,
OBJECT_PT_bTrackToConstraint,
OBJECT_PT_bKinematicConstraint,
OBJECT_PT_bFollowPathConstraint,
OBJECT_PT_bRotLimitConstraint,
OBJECT_PT_bLocLimitConstraint,
OBJECT_PT_bSizeLimitConstraint,
OBJECT_PT_bRotateLikeConstraint,
OBJECT_PT_bLocateLikeConstraint,
OBJECT_PT_bSizeLikeConstraint,
OBJECT_PT_bSameVolumeConstraint,
OBJECT_PT_bTransLikeConstraint,
OBJECT_PT_bActionConstraint,
OBJECT_PT_bActionConstraint_target,
OBJECT_PT_bActionConstraint_action,
OBJECT_PT_bLockTrackConstraint,
OBJECT_PT_bDistLimitConstraint,
OBJECT_PT_bStretchToConstraint,
OBJECT_PT_bMinMaxConstraint,
OBJECT_PT_bClampToConstraint,
OBJECT_PT_bRigidBodyJointConstraint,
OBJECT_PT_bTransformConstraint,
OBJECT_PT_bTransformConstraint_source,
OBJECT_PT_bTransformConstraint_destination,
OBJECT_PT_bShrinkwrapConstraint,
OBJECT_PT_bDampTrackConstraint,
OBJECT_PT_bPivotConstraint,
OBJECT_PT_bFollowTrackConstraint,
OBJECT_PT_bCameraSolverConstraint,
OBJECT_PT_bObjectSolverConstraint,
OBJECT_PT_bTransformCacheConstraint,
OBJECT_PT_bPythonConstraint,
OBJECT_PT_bArmatureConstraint,
OBJECT_PT_bArmatureConstraint_bones,
# Bone panels
BONE_PT_bChildOfConstraint,
BONE_PT_bTrackToConstraint,
BONE_PT_bKinematicConstraint,
BONE_PT_bFollowPathConstraint,
BONE_PT_bRotLimitConstraint,
BONE_PT_bLocLimitConstraint,
BONE_PT_bSizeLimitConstraint,
BONE_PT_bRotateLikeConstraint,
BONE_PT_bLocateLikeConstraint,
BONE_PT_bSizeLikeConstraint,
BONE_PT_bSameVolumeConstraint,
BONE_PT_bTransLikeConstraint,
BONE_PT_bActionConstraint,
BONE_PT_bActionConstraint_target,
BONE_PT_bActionConstraint_action,
BONE_PT_bLockTrackConstraint,
BONE_PT_bDistLimitConstraint,
BONE_PT_bStretchToConstraint,
BONE_PT_bMinMaxConstraint,
BONE_PT_bClampToConstraint,
BONE_PT_bTransformConstraint,
BONE_PT_bTransformConstraint_from,
BONE_PT_bTransformConstraint_to,
BONE_PT_bShrinkwrapConstraint,
BONE_PT_bDampTrackConstraint,
BONE_PT_bSplineIKConstraint,
BONE_PT_bSplineIKConstraint_fitting,
BONE_PT_bSplineIKConstraint_chain_scaling,
BONE_PT_bPivotConstraint,
BONE_PT_bFollowTrackConstraint,
BONE_PT_bCameraSolverConstraint,
BONE_PT_bObjectSolverConstraint,
BONE_PT_bTransformCacheConstraint,
BONE_PT_bPythonConstraint,
BONE_PT_bArmatureConstraint,
BONE_PT_bArmatureConstraint_bones,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 32.392157
| 102
| 0.651678
|
88da40c3dd02cc2ead22517d3c3ca0e9d417e000
| 4,497
|
py
|
Python
|
mmdet/models/losses/solo_dice_loss.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/losses/solo_dice_loss.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/losses/solo_dice_loss.py
|
ruiningTang/mmdetection
|
100b0b5e0edddc45af0812b9f1474493c61671ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weight_reduce_loss
def dice_loss(pred,
target,
weight=None,
eps=1e-3,
reduction='mean',
avg_factor=None):
"""Calculate dice loss, which is proposed in
`V-Net: Fully Convolutional Neural Networks for Volumetric
Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *)
target (torch.Tensor): The learning label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
eps (float): Avoid dividing by zero. Default: 1e-3.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
input = pred.reshape(pred.size()[0], -1)
target = target.reshape(target.size()[0], -1).float()
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + eps
c = torch.sum(target * target, 1) + eps
d = (2 * a) / (b + c)
loss = 1 - d
if weight is not None:
assert weight.ndim == loss.ndim
assert len(weight) == len(pred)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class SOLODiceLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
activate=True,
reduction='mean',
loss_weight=1.0,
eps=1e-3):
"""`Dice Loss, which is proposed in
`V-Net: Fully Convolutional Neural Networks for Volumetric
Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
activate (bool): Whether to activate the predictions inside,
this will disable the inside sigmoid operation.
Defaults to True.
reduction (str, optional): The method used
to reduce the loss. Options are "none",
"mean" and "sum". Defaults to 'mean'.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
eps (float): Avoid dividing by zero. Defaults to 1e-3.
"""
super(SOLODiceLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.reduction = reduction
self.loss_weight = loss_weight
self.eps = eps
self.activate = activate
def forward(self,
pred,
target,
weight=None,
reduction_override=None,
avg_factor=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *).
target (torch.Tensor): The label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.activate:
if self.use_sigmoid:
pred = pred.sigmoid()
else:
raise NotImplementedError
loss = self.loss_weight * dice_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor)
return loss
| 37.789916
| 79
| 0.557038
|
ccbff00ca4b91b81de365b93c7664e1c77371147
| 407
|
py
|
Python
|
communeapp/settings/prod.py
|
jeff-eng/Commune
|
647674c7817068271e18b61276cbcdc58d8e0996
|
[
"MIT"
] | 1
|
2022-03-21T13:46:33.000Z
|
2022-03-21T13:46:33.000Z
|
communeapp/settings/prod.py
|
jeff-eng/Commune
|
647674c7817068271e18b61276cbcdc58d8e0996
|
[
"MIT"
] | 1
|
2022-03-31T00:38:09.000Z
|
2022-03-31T00:38:09.000Z
|
communeapp/settings/prod.py
|
jeff-eng/Commune
|
647674c7817068271e18b61276cbcdc58d8e0996
|
[
"MIT"
] | null | null | null |
from .base import *
from decouple import config
SECRET_KEY = config('SECRET_KEY')
DEBUG = False
ALLOWED_HOSTS = [config('ALLOWED_HOSTS')]
# SMTP Configuration
EMAIL_BACKEND = config('EMAIL_BACKEND')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
| 25.4375
| 51
| 0.783784
|
a29994e9cfdb143cd97bbc50e3efa04757a2aadd
| 8,358
|
py
|
Python
|
core/plugintools.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | null | null | null |
core/plugintools.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | null | null | null |
core/plugintools.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | null | null | null |
import os
import importlib
import yaml
from core import constants
from core.log import log
class HOTSOSDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super().increase_indent(flow, False)
def represent_dict_preserve_order(self, data):
return self.represent_dict(data.items())
def save_part(data, priority=0):
"""
Save part output yaml in temporary location. These are collected and
aggregated at the end of the plugin run.
"""
HOTSOSDumper.add_representer(
dict,
HOTSOSDumper.represent_dict_preserve_order)
out = yaml.dump(data, Dumper=HOTSOSDumper,
default_flow_style=False).rstrip("\n")
parts_index = os.path.join(constants.PLUGIN_TMP_DIR, "index.yaml")
part_path = os.path.join(constants.PLUGIN_TMP_DIR,
"{}.{}.part.yaml".format(constants.PLUGIN_NAME,
constants.PART_NAME))
# don't clobber
if os.path.exists(part_path):
newpath = part_path
i = 0
while os.path.exists(newpath):
i += 1
newpath = "{}.{}".format(part_path, i)
part_path = newpath
with open(part_path, 'w') as fd:
fd.write(out)
index = get_parts_index()
with open(parts_index, 'w') as fd:
if priority in index:
index[priority].append(part_path)
else:
index[priority] = [part_path]
fd.write(yaml.dump(index))
def get_parts_index():
parts_index = os.path.join(constants.PLUGIN_TMP_DIR, "index.yaml")
index = {}
if os.path.exists(parts_index):
with open(parts_index) as fd:
index = yaml.safe_load(fd.read()) or {}
return index
def meld_part_output(data, existing):
"""
Don't allow root level keys to be clobbered, instead just
update them. This assumes that part subkeys will be unique.
"""
remove_keys = []
for key in data:
if key in existing:
if type(existing[key]) == dict:
existing[key].update(data[key])
remove_keys.append(key)
if remove_keys:
for key in remove_keys:
del data[key]
existing.update(data)
def collect_all_parts(index):
parts = {}
for priority in sorted(index):
for part in index[priority]:
with open(part) as fd:
part_yaml = yaml.safe_load(fd)
# Don't allow root level keys to be clobbered, instead just
# update them. This assumes that part subkeys will be unique.
meld_part_output(part_yaml, parts)
return parts
def dump_all_parts():
index = get_parts_index()
if not index:
return
parts = collect_all_parts(index)
if not parts:
return
plugin_master = {constants.PLUGIN_NAME: parts}
HOTSOSDumper.add_representer(
dict,
HOTSOSDumper.represent_dict_preserve_order)
out = yaml.dump(plugin_master, Dumper=HOTSOSDumper,
default_flow_style=False).rstrip("\n")
print(out)
def dump(data, stdout=True):
HOTSOSDumper.add_representer(
dict,
HOTSOSDumper.represent_dict_preserve_order)
out = yaml.dump(data, Dumper=HOTSOSDumper,
default_flow_style=False).rstrip("\n")
if stdout:
print(out)
else:
return out
class ApplicationBase(object):
@property
def bind_interfaces(self):
"""Implement this method to return a dict of network interfaces used
by this application.
"""
raise NotImplementedError
class PluginPartBase(ApplicationBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._output = {}
@property
def plugin_runnable(self):
"""
Must be implemented by all plugins to define at runtime whether they
should run.
"""
raise NotImplementedError
@property
def output(self):
if self._output:
return self._output
def __call__(self):
""" This must be implemented.
The plugin runner will call this method by default unless specific
methods are defined in the plugin definition (yaml).
"""
raise NotImplementedError
class PluginRunner(object):
def __call__(self):
"""
Fetch definition for current plugin and execute each of its parts. See
definitions file at plugins.yaml for information on supported
format.
"""
path = os.path.join(constants.HOTSOS_ROOT, "plugins.yaml")
with open(path) as fd:
yaml_defs = yaml.safe_load(fd.read())
if not yaml_defs:
return
failed_parts = []
plugins = yaml_defs.get("plugins", {})
plugin = plugins.get(constants.PLUGIN_NAME, {})
parts = plugin.get("parts", {})
if not parts:
log.debug("plugin %s has no parts to run", constants.PLUGIN_NAME)
# The following are executed as part of each plugin run (but not last).
ALWAYS_RUN = {'auto_bug_check':
{'core.ycheck.bugs': 'YBugChecker'},
'auto_config_check':
{'core.ycheck.configs': 'YConfigChecker'},
'auto_scenario_check':
{'core.ycheck.scenarios': 'YScenarioChecker'}}
for part, always_parts in ALWAYS_RUN.items():
for obj, cls in always_parts.items():
# update current env to reflect actual part being run
os.environ['PART_NAME'] = part
part_obj = getattr(importlib.import_module(obj), cls)
try:
part_obj()()
except Exception as exc:
failed_parts.append(part)
log.debug("part '%s' raised exception: %s", part, exc)
if constants.DEBUG_MODE:
raise
# NOTE: we don't currently expect these parts to produce any
# output.
for part, obj_names in parts.items():
# update current env to reflect actual part being run
os.environ['PART_NAME'] = part
mod_string = ('plugins.{}.pyparts.{}'.
format(constants.PLUGIN_NAME, part))
# load part
mod = importlib.import_module(mod_string)
# every part should have a yaml priority defined
if hasattr(mod, "YAML_PRIORITY"):
yaml_priority = getattr(mod, "YAML_PRIORITY")
else:
yaml_priority = 0
part_out = {}
for entry in obj_names or []:
part_obj = getattr(mod, entry)()
# Only run plugin if it delares itself runnable.
if not part_obj.plugin_runnable:
log.debug("%s.%s.%s not runnable - skipping",
constants.PLUGIN_NAME, part, entry)
continue
log.debug("running %s.%s.%s",
constants.PLUGIN_NAME, part, entry)
try:
part_obj()
# NOTE: since all parts are expected to be implementations
# of PluginPartBase we expect them to always define an
# output property.
output = part_obj.output
except Exception as exc:
failed_parts.append(part)
log.debug("part '%s' raised exception: %s", part, exc)
output = None
if constants.DEBUG_MODE:
raise
if output:
meld_part_output(output, part_out)
save_part(part_out, priority=yaml_priority)
if failed_parts:
save_part({'failed-parts': failed_parts}, priority=0)
# The following are executed at the end of each plugin run (i.e. after
# all other parts have run).
FINAL_RUN = {'core.plugins.utils.known_bugs_and_issues':
'KnownBugsAndIssuesCollector'}
for obj, cls in FINAL_RUN.items():
getattr(importlib.import_module(obj), cls)()()
| 31.779468
| 79
| 0.57083
|
81c78359cad44b8801c7aab1070a9e6c648d5a82
| 2,084
|
py
|
Python
|
grazyna_rpg/world_manager.py
|
firemark/grazyna-rpg
|
d4174f5ea8e20677d19d5daae4b208b88ccc1ed6
|
[
"MIT"
] | 1
|
2015-07-07T23:11:03.000Z
|
2015-07-07T23:11:03.000Z
|
grazyna_rpg/world_manager.py
|
firemark/grazyna-rpg
|
d4174f5ea8e20677d19d5daae4b208b88ccc1ed6
|
[
"MIT"
] | null | null | null |
grazyna_rpg/world_manager.py
|
firemark/grazyna-rpg
|
d4174f5ea8e20677d19d5daae4b208b88ccc1ed6
|
[
"MIT"
] | null | null | null |
from .monster import Monster
from .level import Level
from .enums import DirectionEnum, LevelType
from .abstract.map import AbstractMap
class PathNotFound(Exception):
pass
class WorldManager(object):
DIRECTIONS_TO_CONNECTIONS = {
DirectionEnum.east: (1, 0),
DirectionEnum.west: (-1, 0),
DirectionEnum.north: (0, 1),
DirectionEnum.south: (0, -1)
}
levels = None
actual_level = None
def __init__(self, map):
if isinstance(map, dict):
self.levels = self.generate_levels_from_raw_data(map)
elif isinstance(map, AbstractMap):
self.levels = map.generate()
else:
raise AttributeError(
'map object is not a dict '
'and dont inheritance from AbstractMap'
)
@staticmethod
def generate_levels_from_raw_data(map):
return {
tuple(int(s) for s in key.split('-')): Level(
type=data['tile_type'],
name=data['name'],
monster_types=[data['mon_type%d' % i] for i in range(1, 4)],
) for key, data in map.items() if data.get('tile_type')
}
def create_connections_with_levels(self):
levels = self.levels
def move_cord(cord_a, cord_b):
return cord_a[0] + cord_b[0], cord_a[1] + cord_b[1]
for actual_cord, level in levels.items():
level.directions.update({
direction: another_level
for direction, another_level in (
(d, levels.get(move_cord(actual_cord, cord)))
for d, cord in self.DIRECTIONS_TO_CONNECTIONS.items()
) if another_level is not None
})
def seek_respawn(self):
return next((
level for level in self.levels.values()
if level.type is LevelType.respawn
), None)
def move(self, direction):
try:
self.actual_level = self.actual_level.directions[direction]
except:
raise PathNotFound(direction)
| 31.104478
| 76
| 0.577735
|
15e67c870e5cd84e131b9fb7077305cbea69fcb7
| 3,717
|
py
|
Python
|
frappe-bench/env/lib/python2.7/site-packages/num2words/lang_HE.py
|
ibrahmm22/library-management
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/num2words/lang_HE.py
|
ibrahmm22/library-management
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/num2words/lang_HE.py
|
ibrahmm22/library-management
|
b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
# Copyright (c) 2003, Taro Ogawa. All Rights Reserved.
# Copyright (c) 2013, Savoir-faire Linux inc. All Rights Reserved.
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
from __future__ import unicode_literals, print_function
ZERO = (u'אפס',)
ONES = {
1: (u'אחד',),
2: (u'שנים',),
3: (u'שלש',),
4: (u'ארבע',),
5: (u'חמש',),
6: (u'שש',),
7: (u'שבע',),
8: (u'שמנה',),
9: (u'תשע',),
}
TENS = {
0: (u'עשר',),
1: (u'אחד עשרה',),
2: (u'שנים עשרה',),
3: (u'שלש עשרה',),
4: (u'ארבע עשרה',),
5: (u'חמש עשרה',),
6: (u'שש עשרה',),
7: (u'שבע עשרה',),
8: (u'שמנה עשרה',),
9: (u'תשע עשרה',),
}
TWENTIES = {
2: (u'עשרים',),
3: (u'שלשים',),
4: (u'ארבעים',),
5: (u'חמישים',),
6: (u'ששים',),
7: (u'שבעים',),
8: (u'שמנים',),
9: (u'תשעים',),
}
HUNDRED = {
1: (u'מאה',),
2: (u'מאתיים',),
3: (u'מאות',)
}
THOUSANDS = {
1: (u'אלף',),
2: (u'אלפיים',),
}
AND = u'ו'
def splitby3(n):
length = len(n)
if length > 3:
start = length % 3
if start > 0:
yield int(n[:start])
for i in range(start, length, 3):
yield int(n[i:i+3])
else:
yield int(n)
def get_digits(n):
return [int(x) for x in reversed(list(('%03d' % n)[-3:]))]
def pluralize(n, forms):
# gettext implementation:
# (n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)
form = 0 if (n % 10 == 1 and n % 100 != 11) else 1 if n != 0 else 2
return forms[form]
def int2word(n):
if n > 9999: #doesn't yet work for numbers this big
raise NotImplementedError()
if n == 0:
return ZERO[0]
words = []
chunks = list(splitby3(str(n)))
i = len(chunks)
for x in chunks:
i -= 1
n1, n2, n3 = get_digits(x)
# print str(n3) + str(n2) + str(n1)
if n3 > 0:
if n3 <= 2:
words.append(HUNDRED[n3][0])
else:
words.append(ONES[n3][0])
words.append(HUNDRED[3][0])
if n2 > 1:
words.append(TWENTIES[n2][0])
if n2 == 1:
words.append(TENS[n1][0])
elif n1 > 0 and not (i > 0 and x == 1):
words.append(ONES[n1][0])
if i > 0:
if i <= 2:
words.append(THOUSANDS[i][0])
else:
words.append(ONES[i][0])
words.append(THOUSANDS[1][0])
if len(words) > 1:
words[-1] = AND + words[-1]
return ' '.join(words)
def n2w(n):
return int2word(int(n))
def to_currency(n, currency='EUR', cents=True, seperator=','):
raise NotImplementedError()
class Num2Word_HE(object):
def to_cardinal(self, number):
return n2w(number)
def to_ordinal(self, number):
raise NotImplementedError()
if __name__ == '__main__':
yo = Num2Word_HE()
nums = [1, 11, 21, 24, 99, 100, 101, 200, 211, 345, 1000, 1011]
for num in nums:
print(num, yo.to_cardinal(num))
| 22.803681
| 71
| 0.538337
|
423d05255006b623490e489e84312922780e691d
| 3,573
|
py
|
Python
|
snuba/migrations/snuba_migrations/events/0002_events_onpremise_compatibility.py
|
fpacifici/snuba
|
cf732b71383c948f9387fbe64e9404ca71f8e9c5
|
[
"Apache-2.0"
] | null | null | null |
snuba/migrations/snuba_migrations/events/0002_events_onpremise_compatibility.py
|
fpacifici/snuba
|
cf732b71383c948f9387fbe64e9404ca71f8e9c5
|
[
"Apache-2.0"
] | null | null | null |
snuba/migrations/snuba_migrations/events/0002_events_onpremise_compatibility.py
|
fpacifici/snuba
|
cf732b71383c948f9387fbe64e9404ca71f8e9c5
|
[
"Apache-2.0"
] | null | null | null |
from typing import Sequence
from snuba.clickhouse.columns import Array, Column, DateTime, Nested, String, UInt
from snuba.clusters.storage_sets import StorageSetKey
from snuba.migrations import migration, operations
from snuba.migrations.columns import MigrationModifiers as Modifiers
class Migration(migration.MultiStepMigration):
"""
This is a one-off migration to support on premise users who are upgrading from
any older version of Snuba that used the old migration system. Since their sentry_local
table might be previously created with slightly different columns, this migration
should bring them back in sync by adding and removing the relevant columns that
have changed over time. It should be a no-op if the table is already up to date.
"""
blocking = False
def forwards_local(self) -> Sequence[operations.Operation]:
return [
operations.AddColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column=Column("group_id", UInt(64)),
after="project_id",
),
operations.DropColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column_name="device_model",
),
operations.AddColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column=Column("sdk_integrations", Array(String())),
after="exception_frames",
),
operations.AddColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column=Column("modules.name", Nested([("name", String())])),
after="sdk_integrations",
),
operations.AddColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column=Column("culprit", String(Modifiers(nullable=True))),
after="sdk_integrations",
),
operations.AddColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column=Column("search_message", String(Modifiers(nullable=True))),
after="received",
),
operations.AddColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column=Column("title", String(Modifiers(nullable=True))),
after="search_message",
),
operations.AddColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column=Column("location", String(Modifiers(nullable=True))),
after="title",
),
operations.AddColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column=Column("_tags_flattened", String()),
after="tags",
),
operations.AddColumn(
storage_set=StorageSetKey.EVENTS,
table_name="sentry_local",
column=Column("message_timestamp", DateTime()),
after="partition",
),
]
def backwards_local(self) -> Sequence[operations.Operation]:
return []
def forwards_dist(self) -> Sequence[operations.Operation]:
return []
def backwards_dist(self) -> Sequence[operations.Operation]:
return []
| 39.263736
| 91
| 0.584663
|
e0c55859fbc0b02b4fe0ab73651df4a5d1e42d82
| 825
|
py
|
Python
|
xfel/libtbx_refresh.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
xfel/libtbx_refresh.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
xfel/libtbx_refresh.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
import libtbx.load_env, os
cxi_user = libtbx.env.find_in_repositories(
relative_path="cxi_user",
test=os.path.isdir)
if cxi_user is None or not os.path.exists(cxi_user):
print(" Creating cxi_user directory")
sources_root = libtbx.env.find_in_repositories(
relative_path=".",
test=os.path.isdir)
cxi_user = os.path.join(sources_root, "cxi_user")
os.mkdir(cxi_user)
init = os.path.join(cxi_user, "__init__.py")
if not os.path.exists(init):
print(" Creating cxi_user/__init__.py")
with open(init, "w") as f:
f.write("from xfel.mono_simulation.mono_treatment import post_outlier_rejection\n")
f.write("from xfel.mono_simulation.mono_treatment import pre_get_predictions\n")
| 35.869565
| 87
| 0.710303
|
82fb345adc800d4db3c56668ea63f197d6e8201a
| 1,119
|
py
|
Python
|
Validation/SiTrackerPhase2V/python/Phase2TrackerValidationFirstStep_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Validation/SiTrackerPhase2V/python/Phase2TrackerValidationFirstStep_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Validation/SiTrackerPhase2V/python/Phase2TrackerValidationFirstStep_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from Validation.SiTrackerPhase2V.Phase2TrackerValidateDigi_cff import *
from Validation.SiTrackerPhase2V.Phase2ITValidateRecHit_cff import *
from Validation.SiTrackerPhase2V.Phase2ITValidateTrackingRecHit_cff import *
from Validation.SiTrackerPhase2V.Phase2ITValidateCluster_cff import *
from Validation.SiTrackerPhase2V.Phase2OTValidateCluster_cff import *
from Validation.SiTrackerPhase2V.Phase2OTValidateTrackingRecHit_cff import *
trackerphase2ValidationSource = cms.Sequence(pixDigiValid
+ otDigiValid
+ rechitValidIT
+ trackingRechitValidIT
+ clusterValidIT
+ clusterValidOT
+ trackingRechitValidOT
)
from Configuration.ProcessModifiers.vectorHits_cff import vectorHits
vectorHits.toReplaceWith(trackerphase2ValidationSource, trackerphase2ValidationSource.copyAndExclude([trackingRechitValidOT]))
| 55.95
| 126
| 0.672922
|
63fa2230d3d261353ff1f24782c8ad593b303171
| 10,489
|
py
|
Python
|
src/utils/data_augmentation.py
|
Sridhar53/Facial-Expression-Recognition-with-Convolution-Neural-Network
|
e48a60f2e4166ae999772fe82d24023a20f23ca3
|
[
"MIT"
] | null | null | null |
src/utils/data_augmentation.py
|
Sridhar53/Facial-Expression-Recognition-with-Convolution-Neural-Network
|
e48a60f2e4166ae999772fe82d24023a20f23ca3
|
[
"MIT"
] | null | null | null |
src/utils/data_augmentation.py
|
Sridhar53/Facial-Expression-Recognition-with-Convolution-Neural-Network
|
e48a60f2e4166ae999772fe82d24023a20f23ca3
|
[
"MIT"
] | 1
|
2018-07-24T02:12:00.000Z
|
2018-07-24T02:12:00.000Z
|
import numpy as np
from random import shuffle
from .preprocessor import preprocess_input
from .preprocessor import _imread as imread
from .preprocessor import _imresize as imresize
from .preprocessor import to_categorical
import scipy.ndimage as ndi
import cv2
class ImageGenerator(object):
""" Image generator with saturation, brightness, lighting, contrast,
horizontal flip and vertical flip transformations. It supports
bounding boxes coordinates.
TODO:
- Finish support for not using bounding_boxes
- Random crop
- Test other transformations
"""
def __init__(self, ground_truth_data, batch_size, image_size,
train_keys, validation_keys,
ground_truth_transformer=None,
path_prefix=None,
saturation_var=0.5,
brightness_var=0.5,
contrast_var=0.5,
lighting_std=0.5,
horizontal_flip_probability=0.5,
vertical_flip_probability=0.5,
do_random_crop=False,
grayscale=False,
zoom_range=[0.75, 1.25],
translation_factor=.3):
self.ground_truth_data = ground_truth_data
self.ground_truth_transformer = ground_truth_transformer
self.batch_size = batch_size
self.path_prefix = path_prefix
self.train_keys = train_keys
self.validation_keys = validation_keys
self.image_size = image_size
self.grayscale = grayscale
self.color_jitter = []
if saturation_var:
self.saturation_var = saturation_var
self.color_jitter.append(self.saturation)
if brightness_var:
self.brightness_var = brightness_var
self.color_jitter.append(self.brightness)
if contrast_var:
self.contrast_var = contrast_var
self.color_jitter.append(self.contrast)
self.lighting_std = lighting_std
self.horizontal_flip_probability = horizontal_flip_probability
self.vertical_flip_probability = vertical_flip_probability
self.do_random_crop = do_random_crop
self.zoom_range = zoom_range
self.translation_factor = translation_factor
def _do_random_crop(self, image_array):
"""IMPORTANT: random crop only works for classification since the
current implementation does no transform bounding boxes"""
height = image_array.shape[0]
width = image_array.shape[1]
x_offset = np.random.uniform(0, self.translation_factor * width)
y_offset = np.random.uniform(0, self.translation_factor * height)
offset = np.array([x_offset, y_offset])
scale_factor = np.random.uniform(self.zoom_range[0],
self.zoom_range[1])
crop_matrix = np.array([[scale_factor, 0],
[0, scale_factor]])
image_array = np.rollaxis(image_array, axis=-1, start=0)
image_channel = [ndi.interpolation.affine_transform(image_channel,
crop_matrix, offset=offset, order=0, mode='nearest',
cval=0.0) for image_channel in image_array]
image_array = np.stack(image_channel, axis=0)
image_array = np.rollaxis(image_array, 0, 3)
return image_array
def do_random_rotation(self, image_array):
"""IMPORTANT: random rotation only works for classification since the
current implementation does no transform bounding boxes"""
height = image_array.shape[0]
width = image_array.shape[1]
x_offset = np.random.uniform(0, self.translation_factor * width)
y_offset = np.random.uniform(0, self.translation_factor * height)
offset = np.array([x_offset, y_offset])
scale_factor = np.random.uniform(self.zoom_range[0],
self.zoom_range[1])
crop_matrix = np.array([[scale_factor, 0],
[0, scale_factor]])
image_array = np.rollaxis(image_array, axis=-1, start=0)
image_channel = [ndi.interpolation.affine_transform(image_channel,
crop_matrix, offset=offset, order=0, mode='nearest',
cval=0.0) for image_channel in image_array]
image_array = np.stack(image_channel, axis=0)
image_array = np.rollaxis(image_array, 0, 3)
return image_array
def _gray_scale(self, image_array):
return image_array.dot([0.299, 0.587, 0.114])
def saturation(self, image_array):
gray_scale = self._gray_scale(image_array)
alpha = 2.0 * np.random.random() * self.brightness_var
alpha = alpha + 1 - self.saturation_var
image_array = alpha * image_array + (1 - alpha) * gray_scale[:, :, None]
return np.clip(image_array, 0, 255)
def brightness(self, image_array):
alpha = 2 * np.random.random() * self.brightness_var
alpha = alpha + 1 - self.saturation_var
image_array = alpha * image_array
return np.clip(image_array, 0, 255)
def contrast(self, image_array):
gray_scale = (self._gray_scale(image_array).mean() *
np.ones_like(image_array))
alpha = 2 * np.random.random() * self.contrast_var
alpha = alpha + 1 - self.contrast_var
image_array = image_array * alpha + (1 - alpha) * gray_scale
return np.clip(image_array, 0, 255)
def lighting(self, image_array):
covariance_matrix = np.cov(image_array.reshape(-1,3) /
255.0, rowvar=False)
eigen_values, eigen_vectors = np.linalg.eigh(covariance_matrix)
noise = np.random.randn(3) * self.lighting_std
noise = eigen_vectors.dot(eigen_values * noise) * 255
image_array = image_array + noise
return np.clip(image_array, 0 ,255)
def horizontal_flip(self, image_array, box_corners=None):
if np.random.random() < self.horizontal_flip_probability:
image_array = image_array[:, ::-1]
if box_corners != None:
box_corners[:, [0, 2]] = 1 - box_corners[:, [2, 0]]
return image_array, box_corners
def vertical_flip(self, image_array, box_corners=None):
if (np.random.random() < self.vertical_flip_probability):
image_array = image_array[::-1]
if box_corners != None:
box_corners[:, [1, 3]] = 1 - box_corners[:, [3, 1]]
return image_array, box_corners
def transform(self, image_array, box_corners=None):
shuffle(self.color_jitter)
for jitter in self.color_jitter:
image_array = jitter(image_array)
if self.lighting_std:
image_array = self.lighting(image_array)
if self.horizontal_flip_probability > 0:
image_array, box_corners = self.horizontal_flip(image_array,
box_corners)
if self.vertical_flip_probability > 0:
image_array, box_corners = self.vertical_flip(image_array,
box_corners)
return image_array, box_corners
def preprocess_images(self, image_array):
return preprocess_input(image_array)
def flow(self, mode='train'):
while True:
if mode =='train':
shuffle(self.train_keys)
keys = self.train_keys
elif mode == 'val' or mode == 'demo':
shuffle(self.validation_keys)
keys = self.validation_keys
else:
raise Exception('invalid mode: %s' % mode)
inputs = []
targets = []
for key in keys:
image_path = self.path_prefix + key
image_array = imread(image_path)
image_array = imresize(image_array, self.image_size)
num_image_channels = len(image_array.shape)
if num_image_channels != 3:
continue
ground_truth = self.ground_truth_data[key]
if self.do_random_crop:
image_array = self._do_random_crop(image_array)
image_array = image_array.astype('float32')
if mode == 'train' or mode == 'demo':
if self.ground_truth_transformer != None:
image_array, ground_truth = self.transform(
image_array,
ground_truth)
ground_truth = (
self.ground_truth_transformer.assign_boxes(
ground_truth))
else:
image_array = self.transform(image_array)[0]
if self.grayscale:
image_array = cv2.cvtColor(image_array.astype('uint8'),
cv2.COLOR_RGB2GRAY).astype('float32')
image_array = np.expand_dims(image_array, -1)
inputs.append(image_array)
targets.append(ground_truth)
if len(targets) == self.batch_size:
inputs = np.asarray(inputs)
targets = np.asarray(targets)
# this will not work for boxes
targets = to_categorical(targets)
if mode == 'train' or mode == 'val':
inputs = self.preprocess_images(inputs)
yield self._wrap_in_dictionary(inputs, targets)
if mode == 'demo':
yield self._wrap_in_dictionary(inputs, targets)
inputs = []
targets = []
def _wrap_in_dictionary(self, image_array, targets):
return [{'input_1':image_array},
{'predictions':targets}]
| 45.017167
| 81
| 0.558395
|
b99be21ef10553651cda3e804b6d2fd6574295cc
| 9,981
|
py
|
Python
|
Hardware/Networks/NiN/nin.py
|
gkrish19/SIAM
|
1e530d4c070054045fc2e8e7fe4ce82a54755132
|
[
"MIT"
] | 4
|
2021-02-02T06:50:43.000Z
|
2022-01-29T12:25:32.000Z
|
Hardware/Networks/NiN/nin.py
|
gkrish19/SIAM
|
1e530d4c070054045fc2e8e7fe4ce82a54755132
|
[
"MIT"
] | null | null | null |
Hardware/Networks/NiN/nin.py
|
gkrish19/SIAM
|
1e530d4c070054045fc2e8e7fe4ce82a54755132
|
[
"MIT"
] | 2
|
2021-07-07T19:58:40.000Z
|
2022-01-27T22:51:20.000Z
|
# ===================================================================== #
# File name: Network_in_Network.py
# Author: BIGBALLON
# Date update: 07/28/2017
# Python Version: 3.5.2
# Tensorflow Version: 1.2.1
# Description:
# Implement Network in Network(only use tensorflow)
# Paper Link: (Network In Network) https://arxiv.org/abs/1312.4400
# Trick Used:
# Data augmentation parameters
# Color normalization
# Weight Decay
# Weight initialization
# Use Nesterov momentum
# Dataset: Cifar-10
# Testing accuracy: 91.18% - 91.25%
# ===================================================================== #
import tensorflow as tf
from data_utility import *
import timeit
from hardware_estimation import hardware_estimation
_input_array = []
_weights = []
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('log_save_path', './nin_logs', 'Directory where to save tensorboard log')
tf.app.flags.DEFINE_string('model_save_path', './model/', 'Directory where to save model weights')
tf.app.flags.DEFINE_integer('batch_size', 128, 'batch size')
tf.app.flags.DEFINE_integer('iteration', 391, 'iteration')
tf.app.flags.DEFINE_float('weight_decay', 0.0001, 'weight decay')
tf.app.flags.DEFINE_float('dropout', 0.5, 'dropout')
tf.app.flags.DEFINE_integer('epochs', 164, 'epochs')
tf.app.flags.DEFINE_float('momentum', 0.9, 'momentum')
tf.app.flags.DEFINE_bool('train', False, 'To train or not')
tf.app.flags.DEFINE_bool('test', True, 'To test or not')
# ========================================================== #
# ├─ conv()
# ├─ activation(x)
# ├─ max_pool()
# └─ global_avg_pool()
# ========================================================== #
def conv(x, shape, use_bias=True, std=0.05):
random_initializer = tf.random_normal_initializer(stddev=std)
W = tf.get_variable('weights', shape=shape, initializer=random_initializer)
b = tf.get_variable('bias', shape=[shape[3]], initializer=tf.zeros_initializer)
_input_array.append(tf.transpose(x, (0,3,1,2)))
_weights.append(W)
x = tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
if use_bias:
x = tf.nn.bias_add(x,b)
return x
def activation(x):
return tf.nn.relu(x)
def max_pool(input, k_size=3, stride=2):
return tf.nn.max_pool(input, ksize=[1, k_size, k_size, 1], strides=[1, stride, stride, 1], padding='SAME')
def global_avg_pool(input, k_size=1, stride=1):
return tf.nn.avg_pool(input, ksize=[1,k_size,k_size,1], strides=[1,stride,stride,1], padding='VALID')
def learning_rate_schedule(epoch_num):
if epoch_num < 81:
return 0.05
elif epoch_num < 121:
return 0.01
else:
return 0.001
def main(_):
train_x, train_y, test_x, test_y = prepare_data()
train_x, test_x = color_preprocessing(train_x, test_x)
# define placeholder x, y_ , keep_prob, learning_rate
with tf.name_scope('input'):
x = tf.placeholder(tf.float32,[None, image_size, image_size, 3], name='input_x')
y_ = tf.placeholder(tf.float32, [None, class_num], name='input_y')
with tf.name_scope('keep_prob'):
keep_prob = tf.placeholder(tf.float32)
with tf.name_scope('learning_rate'):
learning_rate = tf.placeholder(tf.float32)
# build_network
with tf.variable_scope('conv1'):
print("Shape of Layer is", x.shape)
output = conv(x,[5, 5, 3, 192],std=0.01)
output = activation(output)
with tf.variable_scope('mlp1-1'):
print("Shape of Layer is", output.shape)
output = conv(output,[1, 1, 192, 160])
output = activation(output)
with tf.variable_scope('mlp1-2'):
print("Shape of Layer is", output.shape)
output = conv(output,[1, 1, 160, 96])
#print("Shape of Layer is", output.shape)
output = activation(output)
with tf.name_scope('max_pool-1'):
output = max_pool(output, 3, 2)
with tf.name_scope('dropout-1'):
#print("Shape of Layer is", output.shape)
output = tf.nn.dropout(output,keep_prob)
#print("Shape of Layer is", output.shape)
with tf.variable_scope('conv2'):
print("Shape of Layer is", output.shape)
output = conv(output,[5, 5, 96, 192])
#print("Shape of Layer is", output.shape)
output = activation(output)
with tf.variable_scope('mlp2-1'):
print("Shape of Layer is", output.shape)
output = conv(output,[1, 1, 192, 192])
output = activation(output)
with tf.variable_scope('mlp2-2'):
print("Shape of Layer is", output.shape)
output = conv(output,[1, 1, 192, 192])
output = activation(output)
with tf.name_scope('max_pool-2'):
output = max_pool(output, 3, 2)
with tf.name_scope('dropout-2'):
output = tf.nn.dropout(output,keep_prob)
with tf.variable_scope('conv3'):
print("Shape of Layer is", output.shape)
output = conv(output,[3, 3, 192, 192])
output = activation(output)
with tf.variable_scope('mlp3-1'):
print("Shape of Layer is", output.shape)
output = conv(output,[1, 1, 192, 192])
output = activation(output)
with tf.variable_scope('mlp3-2'):
print("Shape of Layer is", output.shape)
output = conv(output,[1, 1, 192, 10])
output = activation(output)
with tf.name_scope('global_avg_pool'):
#print("Shape of Layer is", output.shape)
output = global_avg_pool(output, 8, 1)
with tf.name_scope('moftmax'):
output = tf.reshape(output,[-1,10])
# loss function: cross_entropy
# weight decay: l2 * WEIGHT_DECAY
# train_step: training operation
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=output))
with tf.name_scope('l2_loss'):
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
with tf.name_scope('train_step'):
train_step = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum,use_nesterov=True).minimize(cross_entropy + l2 * FLAGS.weight_decay)
with tf.name_scope('prediction'):
correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
# initial an saver to save model
saver = tf.train.Saver()
# for testing
def run_testing(sess):
acc = 0.0
loss = 0.0
pre_index = 0
add = 1000
for it in range(10):
batch_x = test_x[pre_index:pre_index+add]
batch_y = test_y[pre_index:pre_index+add]
pre_index = pre_index + add
loss_, acc_ = sess.run([cross_entropy,accuracy],feed_dict={x:batch_x, y_:batch_y, keep_prob: 1.0})
loss += loss_ / 10.0
acc += acc_ / 10.0
summary = tf.Summary(value=[tf.Summary.Value(tag="test_loss", simple_value=loss),
tf.Summary.Value(tag="test_accuracy", simple_value=acc)])
return acc, loss, summary
if(FLAGS.train):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(FLAGS.log_save_path,sess.graph)
# epoch = 164
# batch size = 128
# iteration = 391
# we should make sure [bath_size * iteration = data_set_number]
for ep in range(1,FLAGS.epochs+1):
lr = learning_rate_schedule(ep)
pre_index = 0
train_acc = 0.0
train_loss = 0.0
start_time = time.time()
print("\nepoch %d/%d:" %(ep,FLAGS.epochs))
for it in range(1,FLAGS.iteration+1):
if pre_index+FLAGS.batch_size < 50000:
batch_x = train_x[pre_index:pre_index+FLAGS.batch_size]
batch_y = train_y[pre_index:pre_index+FLAGS.batch_size]
else:
batch_x = train_x[pre_index:]
batch_y = train_y[pre_index:]
batch_x = data_augmentation(batch_x)
_, batch_loss = sess.run([train_step, cross_entropy],feed_dict={x:batch_x, y_:batch_y, keep_prob: FLAGS.dropout, learning_rate: lr})
batch_acc = accuracy.eval(feed_dict={x:batch_x, y_:batch_y, keep_prob: 1.0})
train_loss += batch_loss
train_acc += batch_acc
pre_index += FLAGS.batch_size
if it == FLAGS.iteration:
train_loss /= FLAGS.iteration
train_acc /= FLAGS.iteration
train_summary = tf.Summary(value=[tf.Summary.Value(tag="train_loss", simple_value=train_loss),
tf.Summary.Value(tag="train_accuracy", simple_value=train_acc)])
val_acc, val_loss, test_summary = run_testing(sess)
summary_writer.add_summary(train_summary, ep)
summary_writer.add_summary(test_summary, ep)
summary_writer.flush()
print("iteration: %d/%d, cost_time: %ds, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f" %(it, FLAGS.iteration, int(time.time()-start_time), train_loss, train_acc, val_loss, val_acc))
else:
print("iteration: %d/%d, train_loss: %.4f, train_acc: %.4f" %(it, FLAGS.iteration, train_loss / it, train_acc / it) , end='\r')
save_path = saver.save(sess, FLAGS.model_save_path)
print("Model saved in file: %s" % save_path)
elif (FLAGS.test):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(FLAGS.log_save_path,sess.graph)
# epoch = 164
# batch size = 128
# iteration = 391
# we should make sure [bath_size * iteration = data_set_number]
#save_path = saver.save(sess, FLAGS.model_save_path)
# saver.restore(sess, FLAGS.model_save_path)
# print("Model Restored properly.")
pre_index = 0
batch_x = train_x[pre_index:pre_index+FLAGS.batch_size]
batch_y = train_y[pre_index:pre_index+FLAGS.batch_size]
batch_x = data_augmentation(batch_x)
feed_dict={x:batch_x, y_:batch_y, keep_prob: 1.0}
H, W = sess.run([_input_array, _weights], feed_dict=feed_dict)
start = timeit.default_timer()
hardware_estimation(H,W,8,8)
stop = timeit.default_timer()
print("The sim time is:", stop-start)
print("***********************Completed SIAM***********************")
else:
print("Need to choose one them")
if __name__ == '__main__':
tf.app.run()
| 34.65625
| 210
| 0.653542
|
af28f6b1c2a9cbdf0bce47c5ce829e74d86b7608
| 3,086
|
py
|
Python
|
src/test_pi/test_classifier.py
|
msei/insectcounter
|
1e9356e5639789b70c614da6c74307ec2b8c8304
|
[
"MIT"
] | null | null | null |
src/test_pi/test_classifier.py
|
msei/insectcounter
|
1e9356e5639789b70c614da6c74307ec2b8c8304
|
[
"MIT"
] | null | null | null |
src/test_pi/test_classifier.py
|
msei/insectcounter
|
1e9356e5639789b70c614da6c74307ec2b8c8304
|
[
"MIT"
] | 2
|
2020-02-05T05:19:18.000Z
|
2020-02-05T15:04:32.000Z
|
import unittest
import soundfile as sf
import os
from classifier import classifier
import json
import glob
import re
class TestClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cm = classifier.ClassifierModule("../savedfiles", "../model/", False)
def testWrongPath(self):
with self.assertRaises(SystemExit):
self.cm = classifier.ClassifierModule("./IDontExist","../model/", True)
def testWrongModelPath(self):
with self.assertRaises(SystemExit):
self.cm = classifier.ClassifierModule(".","./IDontExist", True)
def testModelNotInPath(self):
with self.assertRaises(SystemExit):
self.cm = classifier.ClassifierModule(".","..", True)
def testGetMaxLabelValid(self):
validvec = [0.2,0.5,0.3]
file_names = glob.glob(self.cm.modelpath + '/species*.json')
match = re.match(r".*/species(\w+).json", file_names[0])
with open(file_names[0]) as json_file:
species_dict = json.load(json_file)
assert self.cm.get_max_label_with_score(validvec)[0] == species_dict["1"]["name"], "Returned wrong label for valid input"
def testGetMaxLabelInvalid(self):
invalidvec = [0.1, 0.1, 0.1, 0.1, 0.6]
assert self.cm.get_max_label_with_score(invalidvec)[0] == "unknown", "Returned wrong label for invalid input, should be unknown"
def testClassify(self):
classification = self.cm.classify("testfile.wav")
assert not classification == "unknown", "No classification returned for valid sample"
def testRemoveFromDisk(self):
testname = "../savedfiles/dummy"
with open(testname+'.txt', "w") as f:
f.write("dummy")
with open(testname+'.json', "w") as f:
f.write("dummy")
self.cm.remove_from_disk(testname+".txt")
assert not os.path.isfile(testname+".txt"), "File was not deleted"
assert not os.path.isfile(testname+".json"), "Json corresponding to file was not deleted"
def testRemoveFromDiskInvalidFile(self):
self.cm.remove_from_disk("dummy")
def testRemoveFromDiskInvalidFileNameType(self):
with self.assertRaises(SystemExit):
self.cm.remove_from_disk(42)
def testGetAdditionalInfoNoFile(self):
timestamp = "dummy"
data = self.cm.get_additional_info(timestamp)
assert len(data) == 1, "Returned data but no file."
def testGetAdditionalInfo(self):
timestamp = "dummy"
dummydict = dict(otherinfo = "dummy")
with open("../savedfiles/"+timestamp+".json", "w") as f:
json.dump(dummydict,f)
data = self.cm.get_additional_info(timestamp)
os.remove("../savedfiles/"+timestamp+".json")
assert data["otherinfo"] == "dummy", "CSV data (other info) not read properly"
assert "model_id" in data, "CSV data (model id) not read properly"
@classmethod
def tearDownClass(cls):
del cls.cm
| 34.288889
| 137
| 0.627997
|
1c02b0e7db7330dd50573d212dc9b26fc20c3bb1
| 5,593
|
py
|
Python
|
infer.py
|
LauJames/QuestionMatching
|
380e32ad4d884ecbf314fbb69b009f34a1f8f6a9
|
[
"Apache-2.0"
] | 4
|
2019-02-22T00:30:44.000Z
|
2021-03-29T09:15:18.000Z
|
infer.py
|
LauJames/QuestionMatching
|
380e32ad4d884ecbf314fbb69b009f34a1f8f6a9
|
[
"Apache-2.0"
] | null | null | null |
infer.py
|
LauJames/QuestionMatching
|
380e32ad4d884ecbf314fbb69b009f34a1f8f6a9
|
[
"Apache-2.0"
] | 4
|
2019-03-03T06:39:41.000Z
|
2019-10-09T09:00:49.000Z
|
#! /user/bin/evn python
# -*- coding:utf8 -*-
"""
@Author : Lau James
@Contact : LauJames2017@whu.edu.cn
@Project : MVLSTM
@File : infer.py
@Time : 18-11-27 下午9:52
@Software : PyCharm
@Copyright: "Copyright (c) 2018 Lau James. All Rights Reserved"
"""
import os
import sys
import time
import datetime
import argparse
import numpy as np
import tensorflow as tf
import tensorflow.contrib as tc
import logging
import jieba
import pickle
from models.MVLSTM import MVLSTM
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(curdir))
def parse_args():
parser = argparse.ArgumentParser('Question to Question matching for QA task')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
train_settings = parser.add_argument_group('train settings')
train_settings.add_argument('--learning_rate', type=float, default=0.001, help='optimizer type')
model_settings = parser.add_argument_group('model settings')
model_settings.add_argument('--algo', choices=['MVLSTM'], default='MVLSTM',
help='choose the algorithm to use')
model_settings.add_argument('--embedding_dim', type=int, default=300,
help='size of the embeddings')
model_settings.add_argument('--hidden_size', type=int, default=128,
help='size of LSTM hidden units')
model_settings.add_argument('--max_q_len', type=int, default=18,
help='max length of question')
model_settings.add_argument('--num_classes', type=int, default=2,
help='num of classes')
path_settings = parser.add_argument_group('path settings')
path_settings.add_argument('--tensorboard_dir', default='tensorboard_dir/MVLSTM',
help='saving path of tensorboard')
path_settings.add_argument('--save_dir', default='checkpoints/MVLSTM',
help='save base dir')
path_settings.add_argument('--log_path',
help='path of the log file. If not set, logs are printed to console')
misc_setting = parser.add_argument_group('misc settings')
misc_setting.add_argument('--allow_soft_placement', type=bool, default=True,
help='allow device soft device placement')
misc_setting.add_argument('--log_device_placement', type=bool, default=False,
help='log placement of ops on devices')
return parser.parse_args()
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return datetime.timedelta(seconds=int(round(time_dif)))
def chinese_tokenizer(documents):
"""
中文文本转换为词序列(restore时还需要用到,必须包含)
:param documents:
:return:
"""
for document in documents:
yield list(jieba.cut(document))
def prepare():
args = parse_args()
start_time = time.time()
# absolute path
save_path = os.path.join(curdir, os.path.join(args.save_dir, 'best_validation'))
vocab_path = os.path.join(curdir, os.path.join(args.save_dir, 'vocab'))
vocab_processor = tc.learn.preprocessing.VocabularyProcessor.restore(vocab_path)
model = MVLSTM(
sequence_length=args.max_q_len,
num_classes=args.num_classes,
embedding_dim=args.embedding_dim,
vocab_size=len(vocab_processor.vocabulary_),
max_length=args.max_q_len,
hidden_dim=args.hidden_size,
learning_rate=args.learning_rate
)
session = tf.Session()
session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(session, save_path=save_path)
time_dif = get_time_dif(start_time)
print('Time usage:', time_dif)
return vocab_processor, model, session
def inference(q1, q2, vocab_processor, model, session):
# args = parse_args()
# vocab_path = os.path.join(curdir, os.path.join(args.save_dir, 'vocab'))
# vocab_processor = tc.learn.preprocessing.VocabularyProcessor.restore(vocab_path)
q1_pad = np.array(list(vocab_processor.transform(q1)))
q2_pad = np.array(list(vocab_processor.transform(q2)))
prediction = session.run(model.y_pred,
feed_dict={
model.input_q1: q1_pad,
model.input_q2: q2_pad,
model.dropout_keep_prob: 1.0
})
return prediction
def infer_prob(q1, q2, vocab_processor, model, session):
q1_pad = np.array(list(vocab_processor.transform(q1)))
q2_pad = np.array(list(vocab_processor.transform(q2)))
predict_prob, prediction = session.run([model.probs, model.y_pred],
feed_dict={
model.input_q1: q1_pad,
model.input_q2: q2_pad,
model.dropout_keep_prob: 1.0
})
return predict_prob, prediction
if __name__ == '__main__':
q1 = ['如何买保险', '如何买保险', '如何买保险']
q2 = ['保险怎么买', '怎么买保险', '糖尿病能不能保']
vocab_processor, model, session = prepare()
probs, predict = infer_prob(q1, q2, vocab_processor, model, session)
print(probs)
print(predict)
# q1 = ['如何买保险']
# q2 = ['保险怎么买']
# vocab_process, model, session = prepare()
# prediction = inference(q1, q2, vocab_process, model, session)
# print(prediction)
| 35.852564
| 100
| 0.626855
|
d05ae1ae87b5fa4506c6410cdba6c8868679a953
| 1,023
|
py
|
Python
|
frontends/python/tests/analysis/lambda.py
|
aardwolf-sfl/aardwolf
|
33bfe3e0649a73aec7efa0fa80bff8077b550bd0
|
[
"MIT"
] | 2
|
2020-08-15T08:55:39.000Z
|
2020-11-09T17:31:16.000Z
|
frontends/python/tests/analysis/lambda.py
|
aardwolf-sfl/aardwolf
|
33bfe3e0649a73aec7efa0fa80bff8077b550bd0
|
[
"MIT"
] | null | null | null |
frontends/python/tests/analysis/lambda.py
|
aardwolf-sfl/aardwolf
|
33bfe3e0649a73aec7efa0fa80bff8077b550bd0
|
[
"MIT"
] | null | null | null |
# AARD: function: foo
# AARD: #1:1 -> #1:2 :: defs: %1 / uses: [@1 4:9-4:10] { arg }
def foo(a):
# AARD: #1:2 -> #1:3 :: defs: %2 / uses: [@1 6:5-6:27]
test = lambda x: x > a
# AARD: #1:3 -> #1:4 :: defs: %3 / uses: [@1 8:5-8:51]
value = lambda n, m: (lambda x: n * x + m * x)
# AARD: #1:4 -> :: defs: / uses: %2, %3 [@1 11:5-11:23] { ret }
return test, value
# AARD: function: foo::lambda:6:11
# AARD: #1:5 -> #1:6 :: defs: %4 / uses: [@1 6:19-6:20] { arg }
# AARD: #1:6 -> :: defs: / uses: %1, %4 [@1 6:22-6:27] { ret }
# AARD: function: foo::lambda:8:12
# AARD: #1:7 -> #1:8 :: defs: %5 / uses: [@1 8:20-8:21] { arg }
# AARD: #1:8 -> #1:9 :: defs: %6 / uses: [@1 8:23-8:24] { arg }
# AARD: #1:9 -> :: defs: / uses: [@1 8:27-8:50] { ret }
# AARD: function: foo::lambda:8:12::lambda:8:26
# AARD: #1:10 -> #1:11 :: defs: %7 / uses: [@1 8:34-8:35] { arg }
# AARD: #1:11 -> :: defs: / uses: %5, %6, %7 [@1 8:37-8:50] { ret }
# AARD: @1 = lambda.py
| 37.888889
| 72
| 0.44477
|
cb8c28bb9fd6a04a43b840f315d6dbfed7227e0f
| 442
|
py
|
Python
|
module/loading.py
|
indmind/Jomblo-Story
|
cbe69be1d9f0d8e592dcf84e3f3764d1eae22b3c
|
[
"MIT"
] | null | null | null |
module/loading.py
|
indmind/Jomblo-Story
|
cbe69be1d9f0d8e592dcf84e3f3764d1eae22b3c
|
[
"MIT"
] | 1
|
2017-08-07T12:12:18.000Z
|
2017-08-08T04:45:41.000Z
|
module/loading.py
|
indmind/Jomblo-Story
|
cbe69be1d9f0d8e592dcf84e3f3764d1eae22b3c
|
[
"MIT"
] | null | null | null |
import time
import sys
def createDots(length, delay):
for i in range(length):
print('.', end='')
sys.stdout.flush()
time.sleep(delay)
def createHash(length, delay):
for i in range(length):
print('#', end='')
sys.stdout.flush()
time.sleep(delay)
def createVrDots(length, delay):
for i in range(length):
print('.')
time.sleep(delay)
def deGa():
time.sleep(.3)
| 19.217391
| 32
| 0.570136
|
a09cf2e9fa4169aad6427b601bbb04bade0314a6
| 8,208
|
py
|
Python
|
scripts/tilestache-compose.py
|
shoeberto/TileStache
|
4526076e9326512a0542adaae86a946e08df8547
|
[
"BSD-3-Clause"
] | 414
|
2015-01-05T19:29:22.000Z
|
2022-03-26T03:39:42.000Z
|
scripts/tilestache-compose.py
|
shoeberto/TileStache
|
4526076e9326512a0542adaae86a946e08df8547
|
[
"BSD-3-Clause"
] | 134
|
2015-01-15T08:25:55.000Z
|
2021-09-02T16:06:00.000Z
|
scripts/tilestache-compose.py
|
shoeberto/TileStache
|
4526076e9326512a0542adaae86a946e08df8547
|
[
"BSD-3-Clause"
] | 176
|
2015-01-09T14:43:25.000Z
|
2022-03-04T16:53:27.000Z
|
#!/usr/bin/env python
from __future__ import print_function
from sys import stderr, path
from tempfile import mkstemp
from os import close, write, unlink
from optparse import OptionParser
from os.path import abspath
try:
from _thread import allocate_lock
except ImportError:
from thread import allocate_lock
import ModestMaps
mmaps_version = tuple(map(int, getattr(ModestMaps, '__version__', '0.0.0').split('.')))
if mmaps_version < (1, 3, 0):
raise ImportError('tilestache-compose.py requires ModestMaps 1.3.0 or newer.')
#
# More imports can be found below, after the --include-path option is known.
#
class Provider (ModestMaps.Providers.IMapProvider):
""" Wrapper for TileStache Layer objects that makes them behave like ModestMaps Provider objects.
Requires ModestMaps 1.3.0 or better to support "file://" URLs.
"""
def __init__(self, layer, verbose=False, ignore_cached=None):
self.projection = layer.projection
self.layer = layer
self.files = []
self.verbose = bool(verbose)
self.ignore_cached = bool(ignore_cached)
self.lock = allocate_lock()
#
# It's possible that Mapnik is not thread-safe, best to be cautious.
#
self.threadsafe = self.layer.provider is not TileStache.Providers.Mapnik
def tileWidth(self):
return 256
def tileHeight(self):
return 256
def getTileUrls(self, coord):
""" Return tile URLs that start with file://, by first retrieving them.
"""
if self.threadsafe or self.lock.acquire():
mime_type, tile_data = TileStache.getTile(self.layer, coord, 'png', self.ignore_cached)
handle, filename = mkstemp(prefix='tilestache-compose-', suffix='.png')
write(handle, tile_data)
close(handle)
self.files.append(filename)
if not self.threadsafe:
# must be locked, right?
self.lock.release()
if self.verbose:
size = len(tile_data) / 1024.
printlocked(self.lock, self.layer.name() + '/%(zoom)d/%(column)d/%(row)d.png' % coord.__dict__, '(%dKB)' % size)
return ('file://' + abspath(filename), )
def __del__(self):
""" Delete any tile that was saved in self.getTileUrls().
"""
for filename in self.files:
unlink(filename)
class BadComposure(Exception):
pass
def printlocked(lock, *stuff):
"""
"""
if lock.acquire():
print(' '.join([str(thing) for thing in stuff]))
lock.release()
parser = OptionParser(usage="""tilestache-compose.py [options] file
There are three ways to set a map coverage area.
1) Center, zoom, and dimensions: create a map of the specified size,
centered on a given geographical point at a given zoom level:
tilestache-compose.py -c config.json -l layer-name -d 800 800 -n 37.8 -122.3 -z 11 out.jpg
2) Extent and dimensions: create a map of the specified size that
adequately covers the given geographical extent:
tilestache-compose.py -c config.json -l layer-name -d 800 800 -e 36.9 -123.5 38.9 -121.2 out.png
3) Extent and zoom: create a map at the given zoom level that covers
the precise geographical extent, at whatever pixel size is necessary:
tilestache-compose.py -c config.json -l layer-name -e 36.9 -123.5 38.9 -121.2 -z 9 out.jpg""")
defaults = dict(center=(37.8044, -122.2712), zoom=14, dimensions=(900, 600), verbose=True)
parser.set_defaults(**defaults)
parser.add_option('-c', '--config', dest='config',
help='Path to configuration file.')
parser.add_option('-l', '--layer', dest='layer',
help='Layer name from configuration.')
parser.add_option('-n', '--center', dest='center', nargs=2,
help='Geographic center of map. Default %.4f, %.4f.' % defaults['center'], type='float',
action='store')
parser.add_option('-e', '--extent', dest='extent', nargs=4,
help='Geographic extent of map. Two lat, lon pairs', type='float',
action='store')
parser.add_option('-z', '--zoom', dest='zoom',
help='Zoom level. Default %(zoom)d.' % defaults, type='int',
action='store')
parser.add_option('-d', '--dimensions', dest='dimensions', nargs=2,
help='Pixel width, height of output image. Default %d, %d.' % defaults['dimensions'], type='int',
action='store')
parser.add_option('-v', '--verbose', dest='verbose',
help='Make a bunch of noise.',
action='store_true')
parser.add_option('-i', '--include-path', dest='include_paths',
help="Add the following colon-separated list of paths to Python's include path (aka sys.path)")
parser.add_option('-x', '--ignore-cached', action='store_true', dest='ignore_cached',
help='Re-render every tile, whether it is in the cache already or not.')
if __name__ == '__main__':
(options, args) = parser.parse_args()
if options.include_paths:
for p in options.include_paths.split(':'):
path.insert(0, p)
import TileStache
try:
if options.config is None:
raise TileStache.Core.KnownUnknown('Missing required configuration (--config) parameter.')
if options.layer is None:
raise TileStache.Core.KnownUnknown('Missing required layer (--layer) parameter.')
config = TileStache.parseConfig(options.config)
if options.layer not in config.layers:
raise TileStache.Core.KnownUnknown('"%s" is not a layer I know about. Here are some that I do know about: %s.' % (options.layer, ', '.join(sorted(config.layers.keys()))))
provider = Provider(config.layers[options.layer], options.verbose, options.ignore_cached)
try:
outfile = args[0]
except IndexError:
raise BadComposure('Error: Missing output file.')
if options.center and options.extent:
raise BadComposure("Error: bad map coverage, center and extent can't both be set.")
elif options.extent and options.dimensions and options.zoom:
raise BadComposure("Error: bad map coverage, dimensions and zoom can't be set together with extent.")
elif options.center and options.zoom and options.dimensions:
lat, lon = options.center[0], options.center[1]
width, height = options.dimensions[0], options.dimensions[1]
dimensions = ModestMaps.Core.Point(width, height)
center = ModestMaps.Geo.Location(lat, lon)
zoom = options.zoom
map = ModestMaps.mapByCenterZoom(provider, center, zoom, dimensions)
elif options.extent and options.dimensions:
latA, lonA = options.extent[0], options.extent[1]
latB, lonB = options.extent[2], options.extent[3]
width, height = options.dimensions[0], options.dimensions[1]
dimensions = ModestMaps.Core.Point(width, height)
locationA = ModestMaps.Geo.Location(latA, lonA)
locationB = ModestMaps.Geo.Location(latB, lonB)
map = ModestMaps.mapByExtent(provider, locationA, locationB, dimensions)
elif options.extent and options.zoom:
latA, lonA = options.extent[0], options.extent[1]
latB, lonB = options.extent[2], options.extent[3]
locationA = ModestMaps.Geo.Location(latA, lonA)
locationB = ModestMaps.Geo.Location(latB, lonB)
zoom = options.zoom
map = ModestMaps.mapByExtentZoom(provider, locationA, locationB, zoom)
else:
raise BadComposure("Error: not really sure what's going on.")
except BadComposure as e:
print(parser.usage, file=stderr)
print('', file=stderr)
print('%s --help for possible options.' % __file__, file=stderr)
print('', file=stderr)
print(e, file=stderr)
exit(1)
if options.verbose:
print(map.coordinate, map.offset, '->', outfile, (map.dimensions.x, map.dimensions.y))
map.draw(False).save(outfile)
| 36.48
| 182
| 0.633163
|
a98340e346bcf639a11a6b9f8c69de55343f1a64
| 5,882
|
py
|
Python
|
deps/lib/python3.5/site-packages/Crypto/Cipher/CAST.py
|
jfarmer08/hassio
|
792a6071a97bb33857c14c9937946233c620035c
|
[
"MIT"
] | 1
|
2018-10-30T07:19:27.000Z
|
2018-10-30T07:19:27.000Z
|
deps/lib/python3.5/site-packages/Crypto/Cipher/CAST.py
|
jfarmer08/hassio
|
792a6071a97bb33857c14c9937946233c620035c
|
[
"MIT"
] | 1
|
2018-04-04T12:13:40.000Z
|
2018-05-03T07:57:52.000Z
|
deps/lib/python3.5/site-packages/Crypto/Cipher/CAST.py
|
jfarmer08/hassio
|
792a6071a97bb33857c14c9937946233c620035c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Cipher/CAST.py : CAST
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""
Module's constants for the modes of operation supported with CAST:
:var MODE_ECB: Electronic Code Book (ECB)
:var MODE_CBC: Cipher-Block Chaining (CBC)
:var MODE_CFB: Cipher FeedBack (CFB)
:var MODE_OFB: Output FeedBack (OFB)
:var MODE_CTR: CounTer Mode (CTR)
:var MODE_OPENPGP: OpenPGP Mode
:var MODE_EAX: EAX Mode
"""
import sys
from Crypto.Cipher import _create_cipher
from Crypto.Util.py3compat import byte_string
from Crypto.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
c_size_t, c_uint8_ptr)
_raw_cast_lib = load_pycryptodome_raw_lib(
"Crypto.Cipher._raw_cast",
"""
int CAST_start_operation(const uint8_t key[],
size_t key_len,
void **pResult);
int CAST_encrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CAST_decrypt(const void *state,
const uint8_t *in,
uint8_t *out,
size_t data_len);
int CAST_stop_operation(void *state);
""")
def _create_base_cipher(dict_parameters):
"""This method instantiates and returns a handle to a low-level
base cipher. It will absorb named parameters in the process."""
try:
key = dict_parameters.pop("key")
except KeyError:
raise TypeError("Missing 'key' parameter")
if len(key) not in key_size:
raise ValueError("Incorrect CAST key length (%d bytes)" % len(key))
start_operation = _raw_cast_lib.CAST_start_operation
stop_operation = _raw_cast_lib.CAST_stop_operation
cipher = VoidPointer()
result = start_operation(c_uint8_ptr(key),
c_size_t(len(key)),
cipher.address_of())
if result:
raise ValueError("Error %X while instantiating the CAST cipher"
% result)
return SmartPointer(cipher.get(), stop_operation)
def new(key, mode, *args, **kwargs):
"""Create a new CAST cipher
:param key:
The secret key to use in the symmetric cipher.
Its length can vary from 5 to 16 bytes.
:type key: byte string
:param mode:
The chaining mode to use for encryption or decryption.
:type mode: One of the supported ``MODE_*`` constants
:Keyword Arguments:
* *iv* (``byte string``) --
(Only applicable for ``MODE_CBC``, ``MODE_CFB``, ``MODE_OFB``,
and ``MODE_OPENPGP`` modes).
The initialization vector to use for encryption or decryption.
For ``MODE_CBC``, ``MODE_CFB``, and ``MODE_OFB`` it must be 8 bytes long.
For ``MODE_OPENPGP`` mode only,
it must be 8 bytes long for encryption
and 10 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
If not provided, a random byte string is generated (you must then
read its value with the :attr:`iv` attribute).
* *nonce* (``byte string``) --
(Only applicable for ``MODE_EAX`` and ``MODE_CTR``).
A value that must never be reused for any other encryption done
with this key.
For ``MODE_EAX`` there are no
restrictions on its length (recommended: **16** bytes).
For ``MODE_CTR``, its length must be in the range **[0..7]**.
If not provided for ``MODE_EAX``, a random byte string is generated (you
can read it back via the ``nonce`` attribute).
* *segment_size* (``integer``) --
(Only ``MODE_CFB``).The number of **bits** the plaintext and ciphertext
are segmented in. It must be a multiple of 8.
If not specified, it will be assumed to be 8.
* *mac_len* : (``integer``) --
(Only ``MODE_EAX``)
Length of the authentication tag, in bytes.
It must be no longer than 8 (default).
* *initial_value* : (``integer``) --
(Only ``MODE_CTR``). The initial value for the counter within
the counter block. By default it is **0**.
:Return: a CAST object, of the applicable mode.
"""
return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs)
MODE_ECB = 1
MODE_CBC = 2
MODE_CFB = 3
MODE_OFB = 5
MODE_CTR = 6
MODE_OPENPGP = 7
MODE_EAX = 9
# Size of a data block (in bytes)
block_size = 8
# Size of a key (in bytes)
key_size = range(5, 16 + 1)
| 36.7625
| 85
| 0.585515
|
97c0e97eb8f6bd8fd578cad33b0f6d33801de8f1
| 541
|
py
|
Python
|
regexlib/2021-5-15/python_re2_test_file/regexlib_3226.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | 1
|
2022-01-24T14:43:23.000Z
|
2022-01-24T14:43:23.000Z
|
regexlib/python_re2_test_file/regexlib_3226.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | null | null | null |
regexlib/python_re2_test_file/regexlib_3226.py
|
yetingli/ReDoS-Benchmarks
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
[
"MIT"
] | null | null | null |
# 3226
# ^([1-9]{1}[0-9]{0,7})+((,[1-9]{1}[0-9]{0,7}){0,1})+$
# EXPONENT
# nums:5
# EXPONENT AttackString:""+"1"*32+"!1 __EOA(iii)"
import re2 as re
from time import perf_counter
regex = """^([1-9]{1}[0-9]{0,7})+((,[1-9]{1}[0-9]{0,7}){0,1})+$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "1" * i * 1 + "!1 __EOA(iii)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!")
| 28.473684
| 66
| 0.547135
|
8f0603c87814b5f190fa1e688846aadb6a1a7ae5
| 568
|
py
|
Python
|
kryptobot/ta/pyti_volume_oscillator.py
|
eristoddle/Kryptobot
|
d0c3050a1c924125810946530670c19b2de72d3f
|
[
"Apache-2.0"
] | 24
|
2018-05-29T13:44:36.000Z
|
2022-03-12T20:41:45.000Z
|
kryptobot/ta/pyti_volume_oscillator.py
|
eristoddle/Kryptobot
|
d0c3050a1c924125810946530670c19b2de72d3f
|
[
"Apache-2.0"
] | 23
|
2018-07-08T02:31:18.000Z
|
2020-06-02T04:07:49.000Z
|
kryptobot/ta/pyti_volume_oscillator.py
|
eristoddle/Kryptobot
|
d0c3050a1c924125810946530670c19b2de72d3f
|
[
"Apache-2.0"
] | 14
|
2018-08-10T15:44:27.000Z
|
2021-06-14T07:14:52.000Z
|
from .generic_indicator import GenericIndicator
from pyti.volume_oscillator import volume_oscillator as indicator
# params: period
# https://github.com/kylejusticemagnuson/pyti/blob/master/pyti/average_true_range.py
class PytiVolumeOscillator(GenericIndicator):
def __init__(self, market, interval, periods, params=None):
super().__init__(market, interval, periods, None, None, params)
def next_calculation(self, candle):
if self.get_datawindow() is not None:
self.value = indicator(self.get_close(), self.params['period'])[-1]
| 37.866667
| 84
| 0.75
|
639faa18a307fe61c3e1afbad4ba856d51c7336b
| 6,349
|
py
|
Python
|
Simple Text classifiers/Text Classification on Yelp dataset/Y_classifier-Parallel str.py
|
tejasurya/Text_Classification_using_Neural_Networks
|
d4852780e6c86843aee768d306d19428c8cb9c7f
|
[
"MIT"
] | 1
|
2020-04-30T16:15:42.000Z
|
2020-04-30T16:15:42.000Z
|
Simple Text classifiers/Yelp dataset based basic DNN Classifiers/Y_classifier-Parallel str.py
|
tejasurya/Text_Classification_using_Neural_Networks
|
d4852780e6c86843aee768d306d19428c8cb9c7f
|
[
"MIT"
] | null | null | null |
Simple Text classifiers/Yelp dataset based basic DNN Classifiers/Y_classifier-Parallel str.py
|
tejasurya/Text_Classification_using_Neural_Networks
|
d4852780e6c86843aee768d306d19428c8cb9c7f
|
[
"MIT"
] | null | null | null |
from numpy import asarray
from numpy import zeros
import pandas as pd
import os
from keras.datasets import reuters
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
from keras.models import Sequential,Model
from keras.layers import Dense
from keras.layers import Flatten,Input
from keras.layers import Dropout
from keras.layers import GRU,CuDNNGRU,Reshape,maximum
from keras.layers import Bidirectional,Concatenate
from keras.layers import Conv1D
from keras.layers.convolutional import Conv2D
from keras.layers import MaxPooling1D
from keras.layers import MaxPool2D
from keras.layers import Embedding
from keras.layers.merge import concatenate
from collections import defaultdict
from nltk.corpus import brown,stopwords
import random
import nltk
import numpy as np
from sklearn.datasets import fetch_20newsgroups
#Custom Activation function
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
import math as m
# fix random seed for reproducibility
np.random.seed(7)
# load the dataset but only keep the top n words, zero the rest
top_words = 10000
batch_size=30
embedding_size=128
nclass=5
# Convolution
kernel_size = 5
filters1 = 64
filters2 =128
filters3=256
filters4=512
filters5=1024
pool_size = 4
sequence_length=500
filter_sizes = [3,4,5]
# GRU
gru_output_size = 64
#LSTM
lstm_output_size = 70
def newacti( x,alpha=m.exp(-1) ):
return K.elu(x,alpha)
trim_len=200
sample_cnt=500
os.chdir("G:/NLP/Dataset/Yelp");
df=pd.read_csv('yelp_review.csv');
#input
ip=df['text'].values.tolist()
ip=ip[0:sample_cnt]
for ty in range(len(ip)):
ip[ty]=ip[ty][0:trim_len]
len_finder=[]
for dat in ip:
len_finder.append(len(dat))
#output
op=df['stars'].values.tolist()
op=op[0:sample_cnt]
labels=[]
for zen in op:
if zen not in labels:
labels.append(zen)
label_class=[]
for ix in op:
label_class.append(labels.index(ix))
#Splitting train and test
input_train=[]
input_test=[]
input_valid=[]
j=0;
for zz in ip:
j=j+1
if (j%5 is 0):
input_test.append(zz)
elif(j%5 is 1):
input_valid.append(zz)
else:
input_train.append(zz)
label_train=[]
label_test=[]
label_valid=[]
j=0;
for zz in label_class:
j=j+1
if (j%5 is 0):
label_test.append(zz)
elif(j%5 is 1):
label_valid.append(zz)
else:
label_train.append(zz)
#one hot encoding
i=0
y=np.zeros((len(label_class),max(label_class)+1))
for x in label_class:
y[i][x]=1
i=i+1
i=0
y_train=np.zeros((len(label_train),max(label_train)+1))
for x in label_train:
y_train[i][x]=1
i=i+1
i=0
y_test=np.zeros((len(label_test),max(label_test)+1))
for x in label_test:
y_test[i][x]=1
i=i+1
i=0
y_valid=np.zeros((len(label_valid),max(label_valid)+1))
for x in label_valid:
y_valid[i][x]=1
i=i+1
t = Tokenizer()
t.fit_on_texts(input_train)
vocab_size = len(t.word_index) + 1
# integer encode the documents
encoded_docs = t.texts_to_sequences(input_train)
#print(encoded_docs)
# pad documents to a max length of 4 words
max_length = max(len_finder)
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
# load the whole embedding into memory
embeddings_index = dict()
f = open("G:\\NLP\\Dataset\\GloVe\\glove.6B.100d.txt", encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
#print('Loaded %s word vectors.' % len(embeddings_index))
# create a weight matrix for words in training docs
embedding_matrix = zeros((vocab_size, 100))
for word, i in t.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
#Validating the model
vt = Tokenizer()
vt.fit_on_texts(input_valid)
vvocab_size = len(vt.word_index) + 1
# integer encode the documents
vencoded_docs = vt.texts_to_sequences(input_valid)
#print(encoded_docs)
# pad documents to a max length of 4 words
vpadded_docs = pad_sequences(vencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
#Testing the model
tt = Tokenizer()
tt.fit_on_texts(input_test)
tvocab_size = len(tt.word_index) + 1
# integer encode the documents
tencoded_docs = tt.texts_to_sequences(input_test)
#print(encoded_docs)
# pad documents to a max length of 4 words
tpadded_docs = pad_sequences(tencoded_docs, maxlen=max_length, padding='post')
#print(padded_docs)
sequence_length=max_length
# create the model
embedding_vecor_length = 100
visible = Input(shape=(sequence_length,), dtype='int32')
# first feature extractor
embedding = Embedding(vocab_size,embedding_vecor_length, input_length=sequence_length, trainable=True)(visible)
e=Reshape((sequence_length,embedding_vecor_length,1))(embedding)
print(e.shape)
conv_0 = Conv2D(filters1, kernel_size=(filter_sizes[0], 100), padding='valid', kernel_initializer='normal', activation=newacti)(e)
maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] +1, 1), strides=(1,1), padding='valid')(conv_0)
#conv_1 = Conv2D(filters1, kernel_size=(filter_sizes[0], 100), padding='valid', kernel_initializer='normal', activation=newacti)(maxpool_0)
#maxpool_1 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] +1, 1), strides=(1,1), padding='valid')(conv_1)
gru=Reshape((sequence_length,embedding_vecor_length))(e)
gru=GRU(gru_output_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)(gru)
gru=GRU(gru_output_size)(gru)
gru=Reshape((1,1,gru_output_size))(gru)
merge = maximum([maxpool_0, gru])
flatten = Flatten()(merge)
dropout = Dropout(0.5)(flatten)
output = Dense(nclass, activation='softmax')(dropout)
model = Model(inputs=visible, outputs=output)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(padded_docs,y_train, epochs=1, verbose=0, nb_epoch=3, batch_size=64, validation_data=(vpadded_docs, y_valid))
print('Model built successfully...Please wait.....Evaluating......')
# Final evaluation of the model
scores = model.evaluate(tpadded_docs, y_test, verbose=0)
print("Loss: %.2f%%" % (scores[0]*100))
print("Accuracy: %.2f%%" % (scores[1]*100))
| 26.676471
| 139
| 0.750669
|
05fb672ecb45eb00505d15bb6fdac468799f7e87
| 255
|
py
|
Python
|
leetcode.com/python/268_Missing_Number.py
|
XSoyOscar/Algorithms
|
6e1626d4b0f7804494f0a651698966ad6fd0fe18
|
[
"MIT"
] | 80
|
2020-07-02T20:47:21.000Z
|
2022-03-22T06:52:59.000Z
|
leetcode.com/python/268_Missing_Number.py
|
XSoyOscar/Algorithms
|
6e1626d4b0f7804494f0a651698966ad6fd0fe18
|
[
"MIT"
] | 1
|
2020-10-05T19:22:10.000Z
|
2020-10-05T19:22:10.000Z
|
leetcode.com/python/268_Missing_Number.py
|
XSoyOscar/Algorithms
|
6e1626d4b0f7804494f0a651698966ad6fd0fe18
|
[
"MIT"
] | 73
|
2020-04-09T22:28:01.000Z
|
2022-02-26T19:22:25.000Z
|
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
missing = len(nums)
for i, num in enumerate(nums):
missing ^= i ^ num
return missing
| 21.25
| 38
| 0.494118
|
d89db994ccdf9f7495628258d0329a67880cd0bb
| 78
|
py
|
Python
|
akebono/__init__.py
|
OTA2000/akebono
|
11f88f3605a66989ac5cf11cb6af7b93987bcf59
|
[
"MIT"
] | 3
|
2018-09-28T01:35:41.000Z
|
2020-06-22T07:09:14.000Z
|
akebono/__init__.py
|
OTA2000/akebono
|
11f88f3605a66989ac5cf11cb6af7b93987bcf59
|
[
"MIT"
] | 1
|
2020-01-06T08:15:10.000Z
|
2020-01-06T08:15:10.000Z
|
akebono/__init__.py
|
OTA2000/akebono
|
11f88f3605a66989ac5cf11cb6af7b93987bcf59
|
[
"MIT"
] | 6
|
2018-08-10T03:04:28.000Z
|
2020-02-03T02:28:08.000Z
|
""" this is akebono package, and modified normally."""
__version__ = "0.0.1"
| 19.5
| 54
| 0.679487
|
9454ee141966f35ac24a4f8ad6d588a8103e6441
| 5,291
|
py
|
Python
|
scripts/emotion_module/objective/emocl/nn/modules.py
|
EnricaIMS/LostInBackTranslation
|
d6921e408197c60de6d4247f412ca1ae86f19b58
|
[
"MIT"
] | 1
|
2021-04-23T08:54:24.000Z
|
2021-04-23T08:54:24.000Z
|
scripts/emotion_module/objective/emocl/nn/modules.py
|
EnricaIMS/LostInBackTranslation
|
d6921e408197c60de6d4247f412ca1ae86f19b58
|
[
"MIT"
] | null | null | null |
scripts/emotion_module/objective/emocl/nn/modules.py
|
EnricaIMS/LostInBackTranslation
|
d6921e408197c60de6d4247f412ca1ae86f19b58
|
[
"MIT"
] | null | null | null |
from torch import nn, torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from objective.emocl.nn.regularization import GaussianNoise
class RNNEncoder(nn.Module):
def __init__(self, input_size, rnn_size, num_layers,
bidirectional, dropout):
"""
A simple RNN Encoder.
Args:
input_size (int): the size of the input features
rnn_size (int):
num_layers (int):
bidirectional (bool):
dropout (float):
Returns: outputs, last_outputs
- **outputs** of shape `(batch, seq_len, hidden_size)`:
tensor containing the output features `(h_t)`
from the last layer of the LSTM, for each t.
- **last_outputs** of shape `(batch, hidden_size)`:
tensor containing the last output features
from the last layer of the LSTM, for each t=seq_len.
"""
super(RNNEncoder, self).__init__()
self.rnn = nn.LSTM(input_size=input_size,
hidden_size=rnn_size,
num_layers=num_layers,
bidirectional=bidirectional,
dropout=dropout,
batch_first=True)
# the dropout "layer" for the output of the RNN
self.drop_rnn = nn.Dropout(dropout)
# define output feature size
self.feature_size = rnn_size
if bidirectional:
self.feature_size *= 2
@staticmethod
def last_by_index(outputs, lengths):
# Index of the last output for each sequence.
idx = (lengths - 1).view(-1, 1).expand(outputs.size(0),
outputs.size(2)).unsqueeze(1)
return outputs.gather(1, idx).squeeze(1)
@staticmethod
def split_directions(outputs):
direction_size = int(outputs.size(-1) / 2)
forward = outputs[:, :, :direction_size]
backward = outputs[:, :, direction_size:]
return forward, backward
def last_timestep(self, outputs, lengths, bi=False):
if bi:
forward, backward = self.split_directions(outputs)
last_forward = self.last_by_index(forward, lengths)
last_backward = backward[:, 0, :]
return torch.cat((last_forward, last_backward), dim=-1)
else:
return self.last_by_index(outputs, lengths)
def forward(self, embs, lengths):
"""
This is the heart of the model. This function, defines how the data
passes through the network.
Args:
embs (): word embeddings
lengths (): the lengths of each sentence
Returns: the logits for each class
"""
# pack the batch
packed = pack_padded_sequence(embs, lengths,
batch_first=True)
out_packed, _ = self.rnn(packed)
# unpack output - no need if we are going to use only the last outputs
outputs, _ = pad_packed_sequence(out_packed, batch_first=True)
# get the outputs from the last *non-masked* timestep for each sentence
last_outputs = self.last_timestep(outputs, lengths,
self.rnn.bidirectional)
# apply dropout to the outputs of the RNN
last_outputs = self.drop_rnn(last_outputs)
return outputs, last_outputs
class Embed(nn.Module):
def __init__(self,
num_embeddings,
embedding_dim,
embeddings=None,
noise=.0,
dropout=.0,
trainable=False):
"""
Define the layer of the model and perform the initializations
of the layers (wherever it is necessary)
Args:
embeddings (numpy.ndarray): the 2D ndarray with the word vectors
noise (float):
dropout (float):
trainable (bool):
"""
super(Embed, self).__init__()
# define the embedding layer, with the corresponding dimensions
self.embedding = nn.Embedding(num_embeddings=num_embeddings,
embedding_dim=embedding_dim)
if embeddings is not None:
print("Initializing Embedding layer with pre-trained weights!")
self.init_embeddings(embeddings, trainable)
# the dropout "layer" for the word embeddings
self.dropout = nn.Dropout(dropout)
# the gaussian noise "layer" for the word embeddings
self.noise = GaussianNoise(noise)
def init_embeddings(self, weights, trainable):
self.embedding.weight = nn.Parameter(weights,
requires_grad=trainable)
def forward(self, x):
"""
This is the heart of the model. This function, defines how the data
passes through the network.
Args:
x (): the input data (the sentences)
Returns: the logits for each class
"""
embeddings = self.embedding(x)
if self.noise.stddev > 0:
embeddings = self.noise(embeddings)
if self.dropout.p > 0:
embeddings = self.dropout(embeddings)
return embeddings
| 33.27673
| 79
| 0.578341
|
12615e52a77a509fe066397aecdd07e7aabd340f
| 7,246
|
py
|
Python
|
sdk/python/kfp/components/_data_passing.py
|
kamalmemon/pipelines
|
7e68991a2a7bfa767f893facfe58190690ca89ed
|
[
"Apache-2.0"
] | 1
|
2020-10-13T13:28:42.000Z
|
2020-10-13T13:28:42.000Z
|
sdk/python/kfp/components/_data_passing.py
|
kamalmemon/pipelines
|
7e68991a2a7bfa767f893facfe58190690ca89ed
|
[
"Apache-2.0"
] | 4
|
2022-02-14T21:39:59.000Z
|
2022-03-08T23:38:00.000Z
|
sdk/python/kfp/components/_data_passing.py
|
kamalmemon/pipelines
|
7e68991a2a7bfa767f893facfe58190690ca89ed
|
[
"Apache-2.0"
] | 2
|
2019-10-15T03:06:15.000Z
|
2019-10-15T03:10:39.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'get_canonical_type_struct_for_type',
'get_canonical_type_for_type_struct',
'get_deserializer_code_for_type',
'get_deserializer_code_for_type_struct',
'get_serializer_func_for_type_struct',
]
import inspect
from typing import Any, Callable, NamedTuple, Sequence
import warnings
Converter = NamedTuple('Converter', [
('types', Sequence[str]),
('type_names', Sequence[str]),
('serializer', Callable[[Any], str]),
('deserializer_code', str),
('definitions', str),
])
def _serialize_str(str_value: str) -> str:
if not isinstance(str_value, str):
raise TypeError('Value "{}" has type "{}" instead of str.'.format(str(str_value), str(type(str_value))))
return str_value
def _serialize_int(int_value: int) -> str:
if isinstance(int_value, str):
return int_value
if not isinstance(int_value, int):
raise TypeError('Value "{}" has type "{}" instead of int.'.format(str(int_value), str(type(int_value))))
return str(int_value)
def _serialize_float(float_value: float) -> str:
if isinstance(float_value, str):
return float_value
if not isinstance(float_value, (float, int)):
raise TypeError('Value "{}" has type "{}" instead of float.'.format(str(float_value), str(type(float_value))))
return str(float_value)
def _serialize_bool(bool_value: bool) -> str:
if isinstance(bool_value, str):
return bool_value
if not isinstance(bool_value, bool):
raise TypeError('Value "{}" has type "{}" instead of bool.'.format(str(bool_value), str(type(bool_value))))
return str(bool_value)
def _deserialize_bool(s) -> bool:
from distutils.util import strtobool
return strtobool(s) == 1
_bool_deserializer_definitions = inspect.getsource(_deserialize_bool)
_bool_deserializer_code = _deserialize_bool.__name__
def _serialize_json(obj) -> str:
if isinstance(obj, str):
return obj
import json
def default_serializer(obj):
if hasattr(obj, 'to_struct'):
return obj.to_struct()
else:
raise TypeError("Object of type '%s' is not JSON serializable and does not have .to_struct() method." % obj.__class__.__name__)
return json.dumps(obj, default=default_serializer, sort_keys=True)
def _serialize_base64_pickle(obj) -> str:
if isinstance(obj, str):
return obj
import base64
import pickle
return base64.b64encode(pickle.dumps(obj)).decode('ascii')
def _deserialize_base64_pickle(s):
import base64
import pickle
return pickle.loads(base64.b64decode(s))
_deserialize_base64_pickle_definitions = inspect.getsource(_deserialize_base64_pickle)
_deserialize_base64_pickle_code = _deserialize_base64_pickle.__name__
_converters = [
Converter([str], ['String', 'str'], _serialize_str, 'str', None),
Converter([int], ['Integer', 'int'], _serialize_int, 'int', None),
Converter([float], ['Float', 'float'], _serialize_float, 'float', None),
Converter([bool], ['Boolean', 'bool'], _serialize_bool, _bool_deserializer_code, _bool_deserializer_definitions),
Converter([list], ['JsonArray', 'List', 'list'], _serialize_json, 'json.loads', 'import json'), # ! JSON map keys are always strings. Python converts all keys to strings without warnings
Converter([dict], ['JsonObject', 'Dictionary', 'Dict', 'dict'], _serialize_json, 'json.loads', 'import json'), # ! JSON map keys are always strings. Python converts all keys to strings without warnings
Converter([], ['Json'], _serialize_json, 'json.loads', 'import json'),
Converter([], ['Base64Pickle'], _serialize_base64_pickle, _deserialize_base64_pickle_code, _deserialize_base64_pickle_definitions),
]
type_to_type_name = {typ: converter.type_names[0] for converter in _converters for typ in converter.types}
type_name_to_type = {type_name: converter.types[0] for converter in _converters for type_name in converter.type_names if converter.types}
type_to_deserializer = {typ: (converter.deserializer_code, converter.definitions) for converter in _converters for typ in converter.types}
type_name_to_deserializer = {type_name: (converter.deserializer_code, converter.definitions) for converter in _converters for type_name in converter.type_names}
type_name_to_serializer = {type_name: converter.serializer for converter in _converters for type_name in converter.type_names}
def get_canonical_type_struct_for_type(typ) -> str:
try:
return type_to_type_name.get(typ, None)
except:
return None
def get_canonical_type_for_type_struct(type_struct) -> str:
try:
return type_name_to_type.get(type_struct, None)
except:
return None
def get_deserializer_code_for_type(typ) -> str:
try:
return type_name_to_deserializer.get(get_canonical_type_struct_for_type[typ], None)
except:
return None
def get_deserializer_code_for_type_struct(type_struct) -> str:
try:
return type_name_to_deserializer.get(type_struct, None)
except:
return None
def get_serializer_func_for_type_struct(type_struct) -> str:
try:
return type_name_to_serializer.get(type_struct, None)
except:
return None
def serialize_value(value, type_name: str) -> str:
'''serialize_value converts the passed value to string based on the serializer associated with the passed type_name'''
if isinstance(value, str):
return value # The value is supposedly already serialized
if type_name is None:
type_name = type_to_type_name.get(type(value), type(value).__name__)
warnings.warn('Missing type name was inferred as "{}" based on the value "{}".'.format(type_name, str(value)))
serializer = type_name_to_serializer.get(type_name, None)
if serializer:
try:
serialized_value = serializer(value)
if not isinstance(serialized_value, str):
raise TypeError('Serializer {} returned result of type "{}" instead of string.'.format(serializer, type(serialized_value)))
return serialized_value
except Exception as e:
raise ValueError('Failed to serialize the value "{}" of type "{}" to type "{}". Exception: {}'.format(
str(value),
str(type(value).__name__),
str(type_name),
str(e),
))
serialized_value = str(value)
warnings.warn('There are no registered serializers from type "{}" to type "{}", so the value will be serializers as string "{}".'.format(
str(type(value).__name__),
str(type_name),
serialized_value),
)
return serialized_value
| 37.739583
| 205
| 0.707839
|
dc592cbb4e72037820fc278d3c86b8c860691d67
| 2,827
|
py
|
Python
|
src/HABApp/openhab/definitions/values.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 44
|
2018-12-13T08:46:44.000Z
|
2022-03-07T03:23:21.000Z
|
src/HABApp/openhab/definitions/values.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 156
|
2019-03-02T20:53:31.000Z
|
2022-03-23T13:13:58.000Z
|
src/HABApp/openhab/definitions/values.py
|
DerOetzi/HABApp
|
a123fbfa9928ebb3cda9a84f6984dcba593c8236
|
[
"Apache-2.0"
] | 18
|
2019-03-08T07:13:21.000Z
|
2022-03-22T19:52:31.000Z
|
import base64
import typing
from HABApp.core.events import ComplexEventValue
class OnOffValue(ComplexEventValue):
ON = 'ON'
OFF = 'OFF'
def __init__(self, value):
super().__init__(value)
assert value == OnOffValue.ON or value == OnOffValue.OFF, f'{value} ({type(value)})'
self.on = value == 'ON'
def __str__(self):
return self.value
class PercentValue(ComplexEventValue):
def __init__(self, value: str):
percent = float(value)
assert 0 <= percent <= 100, f'{percent} ({type(percent)})'
super().__init__(percent)
def __str__(self):
return f'{self.value}%'
class OpenClosedValue(ComplexEventValue):
OPEN = 'OPEN'
CLOSED = 'CLOSED'
def __init__(self, value):
super().__init__(value)
assert value == OpenClosedValue.OPEN or value == OpenClosedValue.CLOSED, f'{value} ({type(value)})'
self.open = value == OpenClosedValue.OPEN
def __str__(self):
return self.value
class UpDownValue(ComplexEventValue):
UP = 'UP'
DOWN = 'DOWN'
def __init__(self, value):
super().__init__(value)
assert value == UpDownValue.UP or value == UpDownValue.DOWN, f'{value} ({type(value)})'
self.up = value == UpDownValue.UP
def __str__(self):
return self.value
class HSBValue(ComplexEventValue):
def __init__(self, value: str):
super().__init__(tuple(float(k) for k in value.split(',')))
def __str__(self):
return f'{self.value[0]}°,{self.value[1]}%,{self.value[2]}%'
class QuantityValue(ComplexEventValue):
@staticmethod
def split_unit(value: str) -> typing.Tuple[str, str]:
p = value.rfind(' ')
# dimensionless has no unit
if p < 0:
return value, ''
val = value[0:p]
unit = value[p + 1:]
return val, unit
def __init__(self, value: str):
value, unit = QuantityValue.split_unit(value)
try:
val: typing.Union[int, float] = int(value)
except ValueError:
val = float(value)
super().__init__(val)
self.unit = unit
def __str__(self):
return f'{self.value} {self.unit}'
class RawValue(ComplexEventValue):
def __init__(self, value: str):
# The data is in this format
# data:image/png;base64,iVBORw0KGgo....
# extract the contents from "data:"
sep_type = value.find(';')
self.type = value[5: sep_type]
# this is our encoded payload
sep_enc = value.find(',', sep_type)
encoding = value[sep_type + 1: sep_enc]
assert encoding == 'base64', f'"{encoding}"'
# set the bytes as value
super().__init__(base64.b64decode(value[sep_enc + 1:]))
def __str__(self):
return f'{self.type}'
| 26.175926
| 107
| 0.598868
|
963bbf8b04e804b11ab9fbb06973e73321ce11db
| 17,242
|
py
|
Python
|
gapic/generator/generator.py
|
googleapis/client-generator-python
|
db9ed9177e65aff07a0c1addf73c32da4dabcaf9
|
[
"Apache-2.0"
] | null | null | null |
gapic/generator/generator.py
|
googleapis/client-generator-python
|
db9ed9177e65aff07a0c1addf73c32da4dabcaf9
|
[
"Apache-2.0"
] | null | null | null |
gapic/generator/generator.py
|
googleapis/client-generator-python
|
db9ed9177e65aff07a0c1addf73c32da4dabcaf9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jinja2
import yaml
import itertools
import re
import os
import pathlib
import typing
from typing import Any, DefaultDict, Dict, Mapping, Tuple
from hashlib import sha256
from collections import OrderedDict, defaultdict
from gapic.samplegen_utils.utils import coerce_response_name, is_valid_sample_cfg, render_format_string
from gapic.samplegen_utils.types import DuplicateSample
from gapic.samplegen_utils import snippet_index, snippet_metadata_pb2
from gapic.samplegen import manifest, samplegen
from gapic.generator import formatter
from gapic.schema import api
from gapic import utils
from gapic.utils import Options
from google.protobuf.compiler.plugin_pb2 import CodeGeneratorResponse
class Generator:
"""A protoc code generator for client libraries.
This class provides an interface for getting a
:class:`~.plugin_pb2.CodeGeneratorResponse` for an :class:`~api.API`
schema object (which it does through rendering templates).
Args:
opts (~.options.Options): An options instance.
templates (str): Optional. Path to the templates to be
rendered. If this is not provided, the templates included with
this application are used.
"""
def __init__(self, opts: Options) -> None:
# Create the jinja environment with which to render templates.
self._env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=opts.templates),
undefined=jinja2.StrictUndefined,
extensions=["jinja2.ext.do"],
trim_blocks=True,
lstrip_blocks=True,
)
# Add filters which templates require.
self._env.filters["rst"] = utils.rst
self._env.filters["snake_case"] = utils.to_snake_case
self._env.filters["camel_case"] = utils.to_camel_case
self._env.filters["sort_lines"] = utils.sort_lines
self._env.filters["wrap"] = utils.wrap
self._env.filters["coerce_response_name"] = coerce_response_name
self._env.filters["render_format_string"] = render_format_string
# Add tests to determine type of expressions stored in strings
self._env.tests["str_field_pb"] = utils.is_str_field_pb
self._env.tests["msg_field_pb"] = utils.is_msg_field_pb
self._sample_configs = opts.sample_configs
def get_response(
self, api_schema: api.API, opts: Options
) -> CodeGeneratorResponse:
"""Return a :class:`~.CodeGeneratorResponse` for this library.
This is a complete response to be written to (usually) stdout, and
thus read by ``protoc``.
Args:
api_schema (~api.API): An API schema object.
opts (~.options.Options): An options instance.
Returns:
~.CodeGeneratorResponse: A response describing appropriate
files and contents. See ``plugin.proto``.
"""
output_files: Dict[str, CodeGeneratorResponse.File] = OrderedDict()
sample_templates, client_templates = utils.partition(
lambda fname: os.path.basename(
fname) == samplegen.DEFAULT_TEMPLATE_NAME,
self._env.loader.list_templates(), # type: ignore
)
# We generate code snippets *before* the library code so snippets
# can be inserted into method docstrings.
snippet_idx = snippet_index.SnippetIndex(api_schema)
if sample_templates:
sample_output, snippet_idx = self._generate_samples_and_manifest(
api_schema, snippet_idx, self._env.get_template(
sample_templates[0]),
opts=opts,
)
output_files.update(sample_output)
# Iterate over each template and add the appropriate output files
# based on that template.
# Sample templates work differently: there's (usually) only one,
# and instead of iterating over it/them, we iterate over samples
# and plug those into the template.
for template_name in client_templates:
# Quick check: Skip "private" templates.
filename = template_name.split("/")[-1]
if filename.startswith("_") and filename != "__init__.py.j2":
continue
# Append to the output files dictionary.
output_files.update(
self._render_template(
template_name, api_schema=api_schema, opts=opts, snippet_index=snippet_idx)
)
# Return the CodeGeneratorResponse output.
res = CodeGeneratorResponse(
file=[i for i in output_files.values()]) # type: ignore
res.supported_features |= CodeGeneratorResponse.Feature.FEATURE_PROTO3_OPTIONAL # type: ignore
return res
def _generate_samples_and_manifest(
self, api_schema: api.API, index: snippet_index.SnippetIndex, sample_template: jinja2.Template, *, opts: Options) -> Tuple[Dict, snippet_index.SnippetIndex]:
"""Generate samples and samplegen manifest for the API.
Arguments:
api_schema (api.API): The schema for the API to which the samples belong.
sample_template (jinja2.Template): The template to use to generate samples.
opts (Options): Additional generator options.
Returns:
Tuple[Dict[str, CodeGeneratorResponse.File], snippet_index.SnippetIndex] : A dict mapping filepath to rendered file.
"""
# The two-layer data structure lets us do two things:
# * detect duplicate samples, which is an error
# * detect distinct samples with the same ID, which are disambiguated
id_to_hash_to_spec: DefaultDict[str,
Dict[str, Any]] = defaultdict(dict)
# Autogenerated sample specs
autogen_specs: typing.List[typing.Dict[str, Any]] = []
if opts.autogen_snippets:
autogen_specs = list(
samplegen.generate_sample_specs(api_schema, opts=opts))
# Also process any handwritten sample specs
handwritten_specs = samplegen.parse_handwritten_specs(
self._sample_configs)
sample_specs = autogen_specs + list(handwritten_specs)
for spec in sample_specs:
# Every sample requires an ID. This may be provided
# by a samplegen config author.
# If no ID is provided, fall back to the region tag.
#
# Ideally the sample author should pick a descriptive, unique ID,
# but this may be impractical and can be error-prone.
spec_hash = sha256(str(spec).encode("utf8")).hexdigest()[:8]
sample_id = spec.get("id") or spec.get("region_tag") or spec_hash
spec["id"] = sample_id
hash_to_spec = id_to_hash_to_spec[sample_id]
if spec_hash in hash_to_spec:
raise DuplicateSample(
f"Duplicate samplegen spec found: {spec}")
hash_to_spec[spec_hash] = spec
out_dir = "samples/generated_samples"
fpath_to_spec_and_rendered = {}
for hash_to_spec in id_to_hash_to_spec.values():
for spec_hash, spec in hash_to_spec.items():
id_is_unique = len(hash_to_spec) == 1
# The ID is used to generate the file name. It must be globally unique.
if not id_is_unique:
spec["id"] += f"_{spec_hash}"
sample, snippet_metadata = samplegen.generate_sample(
spec, api_schema, sample_template,)
fpath = utils.to_snake_case(spec["id"]) + ".py"
fpath_to_spec_and_rendered[os.path.join(out_dir, fpath)] = (
spec,
sample,
)
snippet_metadata.file = fpath
snippet_metadata.title = fpath
index.add_snippet(
snippet_index.Snippet(sample, snippet_metadata))
output_files = {
fname: CodeGeneratorResponse.File(
content=formatter.fix_whitespace(sample), name=fname
)
for fname, (_, sample) in fpath_to_spec_and_rendered.items()
}
if index.metadata_index.snippets:
# NOTE(busunkim): Not all fields are yet populated in the snippet metadata.
# Expected filename: snippet_metadata_{apishortname}_{apiversion}.json
snippet_metadata_path = str(pathlib.Path(
out_dir) / f"snippet_metadata_{api_schema.naming.name}_{api_schema.naming.version}.json").lower()
output_files[snippet_metadata_path] = CodeGeneratorResponse.File(
content=formatter.fix_whitespace(index.get_metadata_json()), name=snippet_metadata_path)
return output_files, index
def _render_template(
self, template_name: str, *, api_schema: api.API, opts: Options, snippet_index: snippet_index.SnippetIndex,
) -> Dict[str, CodeGeneratorResponse.File]:
"""Render the requested templates.
Args:
template_name (str): The template to be rendered.
It is expected that these come from
:class:`jinja2.FileSystemLoader`, and they should be
able to be sent to the :meth:`jinja2.Environment.get_template`
method.
api_schema (~.api.API): An API schema object.
Returns:
Sequence[~.CodeGeneratorResponse.File]: A sequence of File
objects for inclusion in the final response.
"""
answer: Dict[str, CodeGeneratorResponse.File] = OrderedDict()
skip_subpackages = False
# Very, very special case. This flag exists to gate this one file.
if not opts.metadata and template_name.endswith("gapic_metadata.json.j2"):
return answer
# Quick check: Rendering per service and per proto would be a
# combinatorial explosion and is almost certainly not what anyone
# ever wants. Error colorfully on it.
if "%service" in template_name and "%proto" in template_name:
raise ValueError(
"Template files may live under a %proto or "
"%service directory, but not both."
)
# If this template should be rendered for subpackages, process it
# for all subpackages and set the strict flag (restricting what
# services and protos we pull from for the remainder of the method).
if "%sub" in template_name:
for subpackage in api_schema.subpackages.values():
answer.update(
self._render_template(
template_name, api_schema=subpackage, opts=opts, snippet_index=snippet_index
)
)
skip_subpackages = True
# If this template should be rendered once per proto, iterate over
# all protos to be rendered
if "%proto" in template_name:
for proto in api_schema.protos.values():
if (
skip_subpackages
and proto.meta.address.subpackage != api_schema.subpackage_view
):
continue
answer.update(
self._get_file(
template_name, api_schema=api_schema, proto=proto, opts=opts, snippet_index=snippet_index
)
)
return answer
# If this template should be rendered once per service, iterate
# over all services to be rendered.
if "%service" in template_name:
for service in api_schema.services.values():
if (
(skip_subpackages
and service.meta.address.subpackage != api_schema.subpackage_view)
or
('transport' in template_name
and not self._is_desired_transport(template_name, opts))
or
# TODO(yon-mg) - remove when rest async implementation resolved
# temporarily stop async client gen while rest async is unkown
('async' in template_name and 'grpc' not in opts.transport)
):
continue
answer.update(
self._get_file(
template_name,
api_schema=api_schema,
service=service,
opts=opts,
snippet_index=snippet_index,
)
)
return answer
# This file is not iterating over anything else; return back
# the one applicable file.
answer.update(self._get_file(
template_name, api_schema=api_schema, opts=opts, snippet_index=snippet_index))
return answer
def _is_desired_transport(self, template_name: str, opts: Options) -> bool:
"""Returns true if template name contains a desired transport"""
desired_transports = ['__init__', 'base'] + opts.transport
return any(transport in template_name for transport in desired_transports)
def _get_file(
self,
template_name: str,
*,
opts: Options,
api_schema: api.API,
**context,
):
"""Render a template to a protobuf plugin File object."""
# Determine the target filename.
fn = self._get_filename(
template_name, api_schema=api_schema, context=context,)
# Render the file contents.
cgr_file = CodeGeneratorResponse.File(
content=formatter.fix_whitespace(
self._env.get_template(template_name).render(
api=api_schema, opts=opts, **context
),
),
name=fn,
)
# Quick check: Do not render empty files.
if utils.empty(cgr_file.content) and not fn.endswith(
("py.typed", "__init__.py")
):
return {}
# Return the filename and content in a length-1 dictionary
# (because we track output files overall in a dictionary).
return {fn: cgr_file}
def _get_filename(
self, template_name: str, *, api_schema: api.API, context: dict = None,
) -> str:
"""Return the appropriate output filename for this template.
This entails running the template name through a series of
replacements to replace the "filename variables" (``%name``,
``%service``, etc.).
Additionally, any of these variables may be substituted with an
empty value, and we should do the right thing in this case.
(The exception to this is ``%service``, which is guaranteed to be
set if it is needed.)
Args:
template_name (str): The filename of the template, from the
filesystem, relative to ``templates/``.
api_schema (~.api.API): An API schema object.
context (Mapping): Additional context being sent to the template.
Returns:
str: The appropriate output filename.
"""
filename = template_name[: -len(".j2")]
# Replace the %namespace variable.
filename = filename.replace(
"%namespace",
os.path.sep.join(i.lower() for i in api_schema.naming.namespace),
).lstrip(os.path.sep)
# Replace the %name, %version, and %sub variables.
filename = filename.replace(
"%name_%version", api_schema.naming.versioned_module_name
)
filename = filename.replace("%version", api_schema.naming.version)
filename = filename.replace("%name", api_schema.naming.module_name)
filename = filename.replace(
"%sub", "/".join(api_schema.subpackage_view))
# Replace the %service variable if applicable.
if context and "service" in context:
filename = filename.replace(
"%service", context["service"].module_name,)
# Replace the %proto variable if appliable.
# In the cases of protos, we also honor subpackages.
if context and "proto" in context:
filename = filename.replace(
"%proto", context["proto"].module_name,)
# Paths may have empty path segments if components are empty
# (e.g. no %version); handle this.
filename = re.sub(r"/+", "/", filename)
# Done, return the filename.
return filename
__all__ = ("Generator",)
| 41.347722
| 169
| 0.616808
|
97f0c3d0888b1d505ef119f4ae78b66cbe88e4b9
| 517
|
py
|
Python
|
TEP/atcoder242/C.py
|
GuilhermeBraz/unb-workflow
|
37d680a675a87cea2ff936badf94d757393870c3
|
[
"MIT"
] | null | null | null |
TEP/atcoder242/C.py
|
GuilhermeBraz/unb-workflow
|
37d680a675a87cea2ff936badf94d757393870c3
|
[
"MIT"
] | null | null | null |
TEP/atcoder242/C.py
|
GuilhermeBraz/unb-workflow
|
37d680a675a87cea2ff936badf94d757393870c3
|
[
"MIT"
] | null | null | null |
# Given an integer N, find the number of integers X that satisfy all of the following conditions, modulo 998244353.
'''X is an N-digit positive integer.
Let X1,X2,…,XN be the digits of X from top to bottom. They satisfy all of the following:
1≤Xi≤9 for all integers 1≤i≤N;
∣Xi−Xi+1∣≤1 for all integers 1≤i≤N−1.
'''
N = int(input())
#total numbers with N between 1 and 9 that Xi - Xi+1 <= 1
T = 3**(N-1) * 8 + 1
print(T% 998244353)
11
12
21
22
23
32
33
34
43
44
45
54
55
56
65
66
67
76
77
78
87
88
89
98
99
| 12.609756
| 115
| 0.673114
|
409850ad6655d89b72301f089399f55de7ce808f
| 19,378
|
py
|
Python
|
melodic/lib/python2.7/dist-packages/gazebo_msgs/srv/_GetModelProperties.py
|
Dieptranivsr/Ros_Diep
|
d790e75e6f5da916701b11a2fdf3e03b6a47086b
|
[
"MIT"
] | null | null | null |
melodic/lib/python2.7/dist-packages/gazebo_msgs/srv/_GetModelProperties.py
|
Dieptranivsr/Ros_Diep
|
d790e75e6f5da916701b11a2fdf3e03b6a47086b
|
[
"MIT"
] | 1
|
2021-07-08T10:26:06.000Z
|
2021-07-08T10:31:11.000Z
|
melodic/lib/python2.7/dist-packages/gazebo_msgs/srv/_GetModelProperties.py
|
Dieptranivsr/Ros_Diep
|
d790e75e6f5da916701b11a2fdf3e03b6a47086b
|
[
"MIT"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/GetModelPropertiesRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetModelPropertiesRequest(genpy.Message):
_md5sum = "ea31c8eab6fc401383cf528a7c0984ba"
_type = "gazebo_msgs/GetModelPropertiesRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string model_name # name of Gazebo Model
"""
__slots__ = ['model_name']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
model_name
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetModelPropertiesRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.model_name is None:
self.model_name = ''
else:
self.model_name = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.model_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.model_name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.model_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.model_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.model_name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/GetModelPropertiesResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetModelPropertiesResponse(genpy.Message):
_md5sum = "b7f370938ef77b464b95f1bab3ec5028"
_type = "gazebo_msgs/GetModelPropertiesResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string parent_model_name # parent model
string canonical_body_name # name of canonical body, body names are prefixed by model name, e.g. pr2::base_link
string[] body_names # list of bodies, body names are prefixed by model name, e.g. pr2::base_link
string[] geom_names # list of geoms
string[] joint_names # list of joints attached to the model
string[] child_model_names # list of child models
bool is_static # returns true if model is static
bool success # return true if get successful
string status_message # comments if available
"""
__slots__ = ['parent_model_name','canonical_body_name','body_names','geom_names','joint_names','child_model_names','is_static','success','status_message']
_slot_types = ['string','string','string[]','string[]','string[]','string[]','bool','bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
parent_model_name,canonical_body_name,body_names,geom_names,joint_names,child_model_names,is_static,success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetModelPropertiesResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.parent_model_name is None:
self.parent_model_name = ''
if self.canonical_body_name is None:
self.canonical_body_name = ''
if self.body_names is None:
self.body_names = []
if self.geom_names is None:
self.geom_names = []
if self.joint_names is None:
self.joint_names = []
if self.child_model_names is None:
self.child_model_names = []
if self.is_static is None:
self.is_static = False
if self.success is None:
self.success = False
if self.status_message is None:
self.status_message = ''
else:
self.parent_model_name = ''
self.canonical_body_name = ''
self.body_names = []
self.geom_names = []
self.joint_names = []
self.child_model_names = []
self.is_static = False
self.success = False
self.status_message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.parent_model_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.canonical_body_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
length = len(self.body_names)
buff.write(_struct_I.pack(length))
for val1 in self.body_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.Struct('<I%ss'%length).pack(length, val1))
length = len(self.geom_names)
buff.write(_struct_I.pack(length))
for val1 in self.geom_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.Struct('<I%ss'%length).pack(length, val1))
length = len(self.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.Struct('<I%ss'%length).pack(length, val1))
length = len(self.child_model_names)
buff.write(_struct_I.pack(length))
for val1 in self.child_model_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.Struct('<I%ss'%length).pack(length, val1))
_x = self
buff.write(_get_struct_2B().pack(_x.is_static, _x.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.parent_model_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.parent_model_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.canonical_body_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.canonical_body_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.body_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8', 'rosmsg')
else:
val1 = str[start:end]
self.body_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.geom_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8', 'rosmsg')
else:
val1 = str[start:end]
self.geom_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8', 'rosmsg')
else:
val1 = str[start:end]
self.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.child_model_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8', 'rosmsg')
else:
val1 = str[start:end]
self.child_model_names.append(val1)
_x = self
start = end
end += 2
(_x.is_static, _x.success,) = _get_struct_2B().unpack(str[start:end])
self.is_static = bool(self.is_static)
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.parent_model_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.canonical_body_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
length = len(self.body_names)
buff.write(_struct_I.pack(length))
for val1 in self.body_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.Struct('<I%ss'%length).pack(length, val1))
length = len(self.geom_names)
buff.write(_struct_I.pack(length))
for val1 in self.geom_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.Struct('<I%ss'%length).pack(length, val1))
length = len(self.joint_names)
buff.write(_struct_I.pack(length))
for val1 in self.joint_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.Struct('<I%ss'%length).pack(length, val1))
length = len(self.child_model_names)
buff.write(_struct_I.pack(length))
for val1 in self.child_model_names:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.Struct('<I%ss'%length).pack(length, val1))
_x = self
buff.write(_get_struct_2B().pack(_x.is_static, _x.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.parent_model_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.parent_model_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.canonical_body_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.canonical_body_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.body_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8', 'rosmsg')
else:
val1 = str[start:end]
self.body_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.geom_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8', 'rosmsg')
else:
val1 = str[start:end]
self.geom_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.joint_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8', 'rosmsg')
else:
val1 = str[start:end]
self.joint_names.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.child_model_names = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8', 'rosmsg')
else:
val1 = str[start:end]
self.child_model_names.append(val1)
_x = self
start = end
end += 2
(_x.is_static, _x.success,) = _get_struct_2B().unpack(str[start:end])
self.is_static = bool(self.is_static)
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2B = None
def _get_struct_2B():
global _struct_2B
if _struct_2B is None:
_struct_2B = struct.Struct("<2B")
return _struct_2B
class GetModelProperties(object):
_type = 'gazebo_msgs/GetModelProperties'
_md5sum = '5717f7bd34ed990fa80e28b3015027a3'
_request_class = GetModelPropertiesRequest
_response_class = GetModelPropertiesResponse
| 34.915315
| 156
| 0.608628
|
775b1cc06b36f045711f7dc5db91e96e8e944bc7
| 2,869
|
py
|
Python
|
examples/plot_05_calculate_DNP_enhancements-i.py
|
DNPLab/DNPLab
|
78999a4e8320b6476a5aa55d9884c49d74149edc
|
[
"MIT"
] | 4
|
2020-09-23T08:09:33.000Z
|
2022-02-10T22:02:11.000Z
|
examples/plot_05_calculate_DNP_enhancements-i.py
|
DNPLab/DNPLab
|
78999a4e8320b6476a5aa55d9884c49d74149edc
|
[
"MIT"
] | 126
|
2020-09-16T22:25:59.000Z
|
2022-03-29T17:15:27.000Z
|
examples/plot_05_calculate_DNP_enhancements-i.py
|
DNPLab/DNPLab
|
78999a4e8320b6476a5aa55d9884c49d74149edc
|
[
"MIT"
] | 5
|
2020-09-24T20:57:31.000Z
|
2021-08-19T01:52:16.000Z
|
# %%
"""
.. _05-calculate-dnp-enhancements-i:
===================================
05 - Calculate DNP Enhancements (I)
===================================
This example demonstrates how to import DNP-NMR data from an hdf5 file, calculate the dnp enhancements and plot it. If you don't know how to create the workspace yet, please take a look at this tutorial first: :ref:`04-create-a-2d-dnpdata-object-from-individual-spectra`.
"""
# %%
# Load NMR Spectra
# ----------------
# In this example, we will calculate the enhancement for each DNP spectrum, and create a figure of the DNP enhancement versus the microwave power. We will import the 2D dnpdata object created in the previous sample. If you are not yet familiar with how to concatenate individual spectra into the 2D danpdata object, check out this tutorial: :ref:`04-create-a-2d-dnpdata-object-from-individual-spectra`
#
# First, load the 2D dnplab data object and assign it to a workspace (here ws). Once the data is loaded the workspace will have 2 dnpdata objects, the raw data ("raw") and the processed NMR spectra ("proc").
import dnplab as dnp
file_name_path = "../data/h5/PowerBuildUp.h5"
ws = dnp.dnpImport.load(file_name_path)
# %%
# Calculate DNP Enhancement Factors
# ---------------------------------
# DNPLab provides a convenient way to calculate the DNP enhancement factors by using the function ``calculate_enhancement``. Enhancement factors are calculated using integrals. Integrals can be calculated over the entire spectrum, multiple regions, or can be just a single point. However, without calculating integrals first, the ``calculate_enhancement`` function will return an error.
dnp.dnpTools.integrate(ws)
dnp.dnpNMR.calculate_enhancement(ws)
# %%
# In this case, the integral is calculated over the entire spectrum followed by calculating the enhancement factors.
# %%
# .. note::
# The default behavior of the ``calculate_enhancement`` function is to use the first spectrum as the Off signal. If this is the case, the argument ``off_spectrum`` is not necessary unless you want to specify the slice that contains the off spectrum.
# The ``calculate_enhancement``` function can also calculate the enhancement for specific regions of the spectrum. THis behavior will be discussed in the next example (:ref:`07_align_nmr_spectra`).
# %%
# If needed, access your array of enhancements as:
enhancements = ws["enhancements"].values
# %%
# Plot Enhancement Data
# ---------------------
# Finally, we can plot the enhancement data versus the microwave power.
dnp.dnpResults.figure()
dnp.dnpResults.plot(ws["enhancements"],linestyle = '-', marker = 'o', fillstyle = 'none')
dnp.dnpResults.plt.xlabel("Microwave Power (dBm)")
dnp.dnpResults.plt.ylabel("ODNP Enhancement Factor")
dnp.dnpResults.plt.title("10 mM TEMPO in Toluene")
dnp.dnpResults.plt.grid(True)
dnp.dnpResults.show()
| 52.163636
| 401
| 0.729174
|
3264c0c1430bfec9e4577cfaea8dc1926b25630d
| 329
|
py
|
Python
|
custom/icds_reports/migrations/0070_aww_name_in_agg_ccs_view.py
|
dannyroberts/commcare-hq
|
4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598
|
[
"BSD-3-Clause"
] | null | null | null |
custom/icds_reports/migrations/0070_aww_name_in_agg_ccs_view.py
|
dannyroberts/commcare-hq
|
4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598
|
[
"BSD-3-Clause"
] | null | null | null |
custom/icds_reports/migrations/0070_aww_name_in_agg_ccs_view.py
|
dannyroberts/commcare-hq
|
4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-10-11 14:13
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0069_valid_visits'),
]
operations = [
]
| 19.352941
| 49
| 0.696049
|
6652514cfe20c3c629ee2aa5ffa5662a657be752
| 911
|
py
|
Python
|
librespot/core/ApResolver.py
|
JeffmeisterJ/librespot-python
|
0e0e1db65aa40262bd13479b97f81ae8c29ae049
|
[
"Apache-2.0"
] | 1
|
2021-12-15T22:44:46.000Z
|
2021-12-15T22:44:46.000Z
|
librespot/core/ApResolver.py
|
JeffmeisterJ/librespot-python
|
0e0e1db65aa40262bd13479b97f81ae8c29ae049
|
[
"Apache-2.0"
] | 12
|
2021-10-06T02:18:44.000Z
|
2022-02-07T02:16:47.000Z
|
librespot/core/ApResolver.py
|
JeffmeisterJ/librespot-python
|
0e0e1db65aa40262bd13479b97f81ae8c29ae049
|
[
"Apache-2.0"
] | null | null | null |
import random
import requests
class ApResolver:
base_url = "http://apresolve.spotify.com/"
@staticmethod
def request(service_type: str):
response = requests.get("{}?type={}".format(ApResolver.base_url,
service_type))
return response.json()
@staticmethod
def get_random_of(service_type: str):
pool = ApResolver.request(service_type)
urls = pool.get(service_type)
if urls is None or len(urls) == 0:
raise RuntimeError()
return random.choice(urls)
@staticmethod
def get_random_dealer() -> str:
return ApResolver.get_random_of("dealer")
@staticmethod
def get_random_spclient() -> str:
return ApResolver.get_random_of("spclient")
@staticmethod
def get_random_accesspoint() -> str:
return ApResolver.get_random_of("accesspoint")
| 26.794118
| 72
| 0.625686
|
e2d65d05940737de97567805b0a98f97c780edbc
| 340
|
py
|
Python
|
mindhome_alpha/erpnext/patches/v5_0/project_costing.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
mindhome_alpha/erpnext/patches/v5_0/project_costing.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | null | null | null |
mindhome_alpha/erpnext/patches/v5_0/project_costing.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype("Project")
frappe.db.sql("update `tabProject` set expected_start_date = project_start_date, \
expected_end_date = completion_date, actual_end_date = act_completion_date, \
estimated_costing = project_value, gross_margin = gross_margin_value")
| 42.5
| 83
| 0.817647
|
aa4de105e24cd4b14af02401e2969eb95c3c9664
| 8,870
|
py
|
Python
|
infer_detectron2_tridentnet_process.py
|
Ikomia-dev/infer_detectron2_tridentnet
|
7c167b12d6fcee9f5acc3dae57a4a05372ddff99
|
[
"Apache-2.0"
] | 3
|
2021-02-16T08:36:43.000Z
|
2021-02-17T08:11:57.000Z
|
infer_detectron2_tridentnet_process.py
|
Ikomia-dev/Detectron2_TridentNet
|
7c167b12d6fcee9f5acc3dae57a4a05372ddff99
|
[
"Apache-2.0"
] | null | null | null |
infer_detectron2_tridentnet_process.py
|
Ikomia-dev/Detectron2_TridentNet
|
7c167b12d6fcee9f5acc3dae57a4a05372ddff99
|
[
"Apache-2.0"
] | null | null | null |
from infer_detectron2_tridentnet import update_path
from ikomia import core, dataprocess
import copy
import os
import random
from detectron2.engine import DefaultPredictor
from detectron2.data import MetadataCatalog
from detectron2.config import get_cfg
from infer_detectron2_tridentnet.TridentNet_git.tridentnet import add_tridentnet_config
# --------------------
# - Class to handle the process parameters
# - Inherits core.CProtocolTaskParam from Ikomia API
# --------------------
class TridentnetParam(core.CWorkflowTaskParam):
def __init__(self):
core.CWorkflowTaskParam.__init__(self)
self.cuda = True
self.proba = 0.8
def setParamMap(self, param_map):
self.cuda = int(param_map["cuda"])
self.proba = int(param_map["proba"])
def getParamMap(self):
param_map = core.ParamMap()
param_map["cuda"] = str(self.cuda)
param_map["proba"] = str(self.proba)
return param_map
# --------------------
# - Class which implements the process
# - Inherits core.CProtocolTask or derived from Ikomia API
# --------------------
class Tridentnet(dataprocess.C2dImageTask):
def __init__(self, name, param):
dataprocess.C2dImageTask.__init__(self, name)
if param is None:
self.setParam(TridentnetParam())
else:
self.setParam(copy.deepcopy(param))
# get and set config model
self.folder = os.path.dirname(os.path.realpath(__file__))
self.MODEL_NAME_CONFIG = "tridentnet_fast_R_101_C4_3x"
self.MODEL_NAME = "model_final_164568"
self.cfg = get_cfg()
add_tridentnet_config(self.cfg)
self.cfg.merge_from_file(self.folder + "/TridentNet_git/configs/"+self.MODEL_NAME_CONFIG+".yaml")
self.cfg.MODEL.WEIGHTS = self.folder + "/models/"+self.MODEL_NAME+".pkl"
self.loaded = False
self.deviceFrom = ""
# add output
self.addOutput(dataprocess.CGraphicsOutput())
def getProgressSteps(self, eltCount=1):
# Function returning the number of progress steps for this process
# This is handled by the main progress bar of Ikomia application
return 2
def run(self):
self.beginTaskRun()
# we use seed to keep the same color for our masks + boxes + labels (same random each time)
random.seed(30)
# Get input :
input = self.getInput(0)
srcImage = input.getImage()
# Get output :
output_image = self.getOutput(0)
output_graph = self.getOutput(1)
output_graph.setNewLayer("TridentNet")
# Get parameters :
param = self.getParam()
# predictor
if not self.loaded:
print("Chargement du modèle")
if param.cuda == False:
self.cfg.MODEL.DEVICE = "cpu"
self.deviceFrom = "cpu"
else:
self.deviceFrom = "gpu"
self.loaded = True
self.predictor = DefaultPredictor(self.cfg)
# reload model if CUDA check and load without CUDA
elif self.deviceFrom == "cpu" and param.cuda == True:
print("Chargement du modèle")
self.cfg = get_cfg()
add_tridentnet_config(self.cfg)
self.cfg.merge_from_file(self.folder + "/TridentNet_git/configs/"+self.MODEL_NAME_CONFIG+".yaml")
self.cfg.MODEL.WEIGHTS = self.folder + "/models/"+self.MODEL_NAME+".pkl"
self.deviceFrom = "gpu"
self.predictor = DefaultPredictor(self.cfg)
# reload model if CUDA not check and load with CUDA
elif self.deviceFrom == "gpu" and param.cuda == False:
print("Chargement du modèle")
self.cfg = get_cfg()
self.cfg.MODEL.DEVICE = "cpu"
add_tridentnet_config(self.cfg)
self.cfg.merge_from_file(self.folder + "/TridentNet_git/configs/"+self.MODEL_NAME_CONFIG+".yaml")
self.cfg.MODEL.WEIGHTS = self.folder + "/models/"+self.MODEL_NAME+".pkl"
self.deviceFrom = "cpu"
self.predictor = DefaultPredictor(self.cfg)
outputs = self.predictor(srcImage)
# get outputs instances
output_image.setImage(srcImage)
boxes = outputs["instances"].pred_boxes
scores = outputs["instances"].scores
classes = outputs["instances"].pred_classes
# to numpy
if param.cuda :
boxes_np = boxes.tensor.cpu().numpy()
scores_np = scores.cpu().numpy()
classes_np = classes.cpu().numpy()
else :
boxes_np = boxes.tensor.numpy()
scores_np = scores.numpy()
classes_np = classes.numpy()
self.emitStepProgress()
# keep only the results with proba > threshold
scores_np_tresh = list()
for s in scores_np:
if float(s) > param.proba:
scores_np_tresh.append(s)
self.emitStepProgress()
if len(scores_np_tresh) > 0:
# text label with score
labels = None
class_names = MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]).get("thing_classes")
if classes is not None and class_names is not None and len(class_names) > 1:
labels = [class_names[i] for i in classes]
if scores_np_tresh is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores_np_tresh]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores_np_tresh)]
# Show Boxes + labels
for i in range(len(scores_np_tresh)):
color = [random.randint(0,255), random.randint(0,255), random.randint(0,255), 255]
prop_text = core.GraphicsTextProperty()
prop_text.color = color
prop_text.font_size = 7
output_graph.addText(labels[i], float(boxes_np[i][0]), float(boxes_np[i][1]), prop_text)
prop_rect = core.GraphicsRectProperty()
prop_rect.pen_color = color
prop_rect.category = labels[i]
output_graph.addRectangle(float(boxes_np[i][0]), float(boxes_np[i][1]), float(boxes_np[i][2] - boxes_np[i][0]), float(boxes_np[i][3] - boxes_np[i][1]), prop_rect)
# Step progress bar:
self.emitStepProgress()
# Call endTaskRun to finalize process
self.endTaskRun()
# --------------------
# - Factory class to build process object
# - Inherits dataprocess.CProcessFactory from Ikomia API
# --------------------
class TridentnetFactory(dataprocess.CTaskFactory):
def __init__(self):
dataprocess.CTaskFactory.__init__(self)
# Set process information as string here
self.info.name = "infer_detectron2_tridentnet"
self.info.shortDescription = "TridentNet inference model of Detectron2 for object detection."
self.info.description = "TridentNet inference model for object detection trained on COCO. " \
"Implementation from Detectron2 (Facebook Research). " \
"Trident Network (TridentNet) aims to generate scale-specific feature maps " \
"with a uniform representational power. We construct a parallel multi-branch " \
"architecture in which each branch shares the same transformation parameters " \
"but with different receptive fields. TridentNet-Fast is a fast approximation " \
"version of TridentNet that could achieve significant improvements without " \
"any additional parameters and computational cost." \
"This Ikomia plugin can make inference of pre-trained model " \
"with ResNet101 backbone + C4 head."
self.info.authors = "Li, Yanghao and Chen, Yuntao and Wang, Naiyan and Zhang, Zhaoxiang"
self.info.article = "Scale-Aware Trident Networks for Object Detection"
self.info.journal = "IEEE International Conference on Computer Vision (ICCV)"
self.info.year = 2019
self.info.license = "Apache-2.0 License"
self.info.documentationLink = "https://detectron2.readthedocs.io/index.html"
self.info.repo = "https://github.com/facebookresearch/detectron2/tree/master/projects/TridentNet"
self.info.path = "Plugins/Python/Detectron2"
self.info.iconPath = "icons/detectron2.png"
self.info.version = "1.0.1"
self.info.keywords = "object,facebook,detectron2,detection,multi,scale"
def create(self, param=None):
# Create process object
return Tridentnet(self.info.name, param)
| 42.644231
| 178
| 0.608455
|
a03065bf397975ebb8fd54f2b817241f6c63d39d
| 28,823
|
py
|
Python
|
nova/tests/unit/network/test_api.py
|
bopopescu/nested_quota_final
|
7c3454883de9f5368fa943924540eebe157a319d
|
[
"Apache-2.0"
] | 5
|
2017-06-23T07:37:39.000Z
|
2020-10-21T07:07:50.000Z
|
nova/tests/unit/network/test_api.py
|
bopopescu/nested_quota_final
|
7c3454883de9f5368fa943924540eebe157a319d
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/network/test_api.py
|
bopopescu/nested_quota_final
|
7c3454883de9f5368fa943924540eebe157a319d
|
[
"Apache-2.0"
] | 4
|
2017-06-23T07:37:43.000Z
|
2020-12-28T09:57:22.000Z
|
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for network API."""
import contextlib
import itertools
import uuid
import mock
from mox3 import mox
from nova.compute import flavors
from nova import context
from nova import exception
from nova import network
from nova.network import api
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import fields
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_virtual_interface
from nova import utils
FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
policy.reset()
def test_check_policy(self):
self.mox.StubOutWithMock(policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
policy.enforce(self.context, 'network:get_all', target)
self.mox.ReplayAll()
api.check_policy(self.context, 'get_all')
def test_skip_policy(self):
policy.reset()
rules = {'network:get_all': common_policy.parse_rule('!')}
policy.set_rules(common_policy.Rules(rules))
api = network.API()
self.assertRaises(exception.PolicyNotAuthorized,
api.get_all, self.context)
api = network.API(skip_policy_check=True)
api.get_all(self.context)
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.network_api = network.API()
self.context = context.RequestContext('fake-user',
'fake-project')
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all(self, mock_get_all):
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_liberal(self, mock_get_all):
self.flags(network_manager='nova.network.manager.FlatDHCPManaager')
mock_get_all.return_value = mock.sentinel.get_all
self.assertEqual(mock.sentinel.get_all,
self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only="allow_none")
@mock.patch('nova.objects.NetworkList.get_all')
def test_get_all_no_networks(self, mock_get_all):
mock_get_all.side_effect = exception.NoNetworksFound
self.assertEqual([], self.network_api.get_all(self.context))
mock_get_all.assert_called_once_with(self.context,
project_only=True)
@mock.patch('nova.objects.Network.get_by_uuid')
def test_get(self, mock_get):
mock_get.return_value = mock.sentinel.get_by_uuid
with mock.patch.object(self.context, 'elevated') as elevated:
elevated.return_value = mock.sentinel.elevated_context
self.assertEqual(mock.sentinel.get_by_uuid,
self.network_api.get(self.context, 'fake-uuid'))
mock_get.assert_called_once_with(mock.sentinel.elevated_context,
'fake-uuid')
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_instance')
def test_get_vifs_by_instance(self, mock_get_by_instance,
mock_get_by_id):
mock_get_by_instance.return_value = [
dict(test_virtual_interface.fake_vif,
network_id=123)]
mock_get_by_id.return_value = objects.Network()
mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid
instance = objects.Instance(uuid=mock.sentinel.inst_uuid)
vifs = self.network_api.get_vifs_by_instance(self.context,
instance)
self.assertEqual(1, len(vifs))
self.assertEqual(123, vifs[0].network_id)
self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid)
mock_get_by_instance.assert_called_once_with(
self.context, str(mock.sentinel.inst_uuid), use_slave=False)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
@mock.patch('nova.objects.Network.get_by_id')
@mock.patch('nova.db.virtual_interface_get_by_address')
def test_get_vif_by_mac_address(self, mock_get_by_address,
mock_get_by_id):
mock_get_by_address.return_value = dict(
test_virtual_interface.fake_vif, network_id=123)
mock_get_by_id.return_value = objects.Network(
uuid=mock.sentinel.network_uuid)
vif = self.network_api.get_vif_by_mac_address(self.context,
mock.sentinel.mac)
self.assertEqual(123, vif.network_id)
self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid)
mock_get_by_address.assert_called_once_with(self.context,
mock.sentinel.mac)
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
def test_allocate_for_instance_handles_macs_passed(self):
# If a macs argument is supplied to the 'nova-network' API, it is just
# ignored. This test checks that the call down to the rpcapi layer
# doesn't pass macs down: nova-network doesn't support hypervisor
# mac address limits (today anyhow).
macs = set(['ab:cd:ef:01:23:34'])
self.mox.StubOutWithMock(
self.network_api.network_rpcapi, "allocate_for_instance")
kwargs = dict(zip(['host', 'instance_id', 'project_id',
'requested_networks', 'rxtx_factor', 'vpn', 'macs',
'dhcp_options'],
itertools.repeat(mox.IgnoreArg())))
self.network_api.network_rpcapi.allocate_for_instance(
mox.IgnoreArg(), **kwargs).AndReturn([])
self.mox.ReplayAll()
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 0
sys_meta = flavors.save_flavor_info({}, flavor)
instance = dict(id=1, uuid='uuid', project_id='project_id',
host='host', system_metadata=utils.dict_to_metadata(sys_meta))
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'], **instance)
self.network_api.allocate_for_instance(
self.context, instance, 'vpn', 'requested_networks', macs=macs)
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
new_instance = {'uuid': 'new-uuid'}
def fake_associate(*args, **kwargs):
return orig_instance_uuid
self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
fake_associate)
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None,
use_slave=None):
return fake_instance.fake_db_instance(uuid=instance_uuid)
self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
def fake_get_nw_info(ctxt, instance):
class FakeNWInfo(object):
def json(self):
pass
return FakeNWInfo()
self.stubs.Set(self.network_api, '_get_instance_nw_info',
fake_get_nw_info)
if orig_instance_uuid:
expected_updated_instances = [new_instance['uuid'],
orig_instance_uuid]
else:
expected_updated_instances = [new_instance['uuid']]
def fake_instance_info_cache_update(context, instance_uuid, cache):
self.assertEqual(instance_uuid,
expected_updated_instances.pop())
self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
fake_instance_info_cache_update)
def fake_update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
update_cells=True):
return
self.stubs.Set(base_api, "update_instance_cache_with_nw_info",
fake_update_instance_cache_with_nw_info)
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
'10.0.0.2')
def test_associate_preassociated_floating_ip(self):
self._do_test_associate_floating_ip('orig-uuid')
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
def test_get_floating_ip_invalid_id(self):
self.assertRaises(exception.InvalidID,
self.network_api.get_floating_ip,
self.context, '123zzz')
@mock.patch('nova.objects.FloatingIP.get_by_id')
def test_get_floating_ip(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip(self.context, 123))
mock_get.assert_called_once_with(self.context, 123)
@mock.patch('nova.objects.FloatingIP.get_pool_names')
def test_get_floating_ip_pools(self, mock_get):
pools = ['foo', 'bar']
mock_get.return_value = pools
self.assertEqual(pools,
self.network_api.get_floating_ip_pools(
self.context))
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
floating = mock.sentinel.floating
mock_get.return_value = floating
self.assertEqual(floating,
self.network_api.get_floating_ip_by_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context,
mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
floatings = mock.sentinel.floating_ips
mock_get.return_value = floatings
self.assertEqual(floatings,
self.network_api.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context,
self.context.project_id)
def _stub_migrate_instance_calls(self, method, multi_host, info):
fake_flavor = flavors.get_default_flavor()
fake_flavor['rxtx_factor'] = 1.21
sys_meta = flavors.save_flavor_info({}, fake_flavor)
fake_instance = objects.Instance(
uuid=uuid.uuid4().hex,
project_id='fake_project_id',
instance_type_id=fake_flavor['id'],
system_metadata=sys_meta)
fake_migration = {'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest'}
def fake_mig_inst_method(*args, **kwargs):
info['kwargs'] = kwargs
def fake_get_multi_addresses(*args, **kwargs):
return multi_host, ['fake_float1', 'fake_float2']
self.stubs.Set(network_rpcapi.NetworkAPI, method,
fake_mig_inst_method)
self.stubs.Set(self.network_api, '_get_multi_addresses',
fake_get_multi_addresses)
expected = {'instance_uuid': fake_instance.uuid,
'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest',
'rxtx_factor': 1.21,
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
def test_migrate_instance_start_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_start_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', False, info)
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', False, info)
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_is_multi_host_instance_has_no_fixed_ip(self):
def fake_fixed_ip_get_by_instance(ctxt, uuid):
raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = {'uuid': FAKE_UUID}
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertFalse(result)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_no_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=None,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = {'uuid': FAKE_UUID}
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_no_project_id_multi(self):
self._test_is_multi_host_network_has_no_project_id(True)
def test_is_multi_host_network_has_no_project_id_non_multi(self):
self._test_is_multi_host_network_has_no_project_id(False)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def _test_is_multi_host_network_has_project_id(self, is_multi_host,
fip_get):
network = objects.Network(
id=123, project_id=self.context.project_id,
multi_host=is_multi_host)
fip_get.return_value = [
objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
floating_ips=objects.FloatingIPList())]
instance = {'uuid': FAKE_UUID}
result, floats = self.network_api._get_multi_addresses(self.context,
instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_project_id_multi(self):
self._test_is_multi_host_network_has_project_id(True)
def test_is_multi_host_network_has_project_id_non_multi(self):
self._test_is_multi_host_network_has_project_id(False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_project(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, project=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=False, project=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate_host(self, mock_disassociate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, host=None)
mock_disassociate.assert_called_once_with(self.context, net_obj.id,
host=True, project=False)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.associate')
def test_network_associate_project(self, mock_associate, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
project = mock.sentinel.project
self.network_api.associate(self.context, FAKE_UUID, project=project)
mock_associate.assert_called_once_with(self.context, project,
network_id=net_obj.id,
force=True)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.save')
def test_network_associate_host(self, mock_save, mock_get):
net_obj = objects.Network(context=self.context, id=1)
mock_get.return_value = net_obj
host = str(mock.sentinel.host)
self.network_api.associate(self.context, FAKE_UUID, host=host)
mock_save.assert_called_once_with()
self.assertEqual(host, net_obj.host)
@mock.patch('nova.objects.Network.get_by_uuid')
@mock.patch('nova.objects.Network.disassociate')
def test_network_disassociate(self, mock_disassociate, mock_get):
mock_get.return_value = objects.Network(context=self.context, id=123)
self.network_api.disassociate(self.context, FAKE_UUID)
mock_disassociate.assert_called_once_with(self.context, 123,
project=True, host=True)
def _test_refresh_cache(self, method, *args, **kwargs):
# This test verifies that no call to get_instance_nw_info() is made
# from the @refresh_cache decorator for the tested method.
with contextlib.nested(
mock.patch.object(self.network_api.network_rpcapi, method),
mock.patch.object(self.network_api.network_rpcapi,
'get_instance_nw_info'),
mock.patch.object(network_model.NetworkInfo, 'hydrate'),
) as (
method_mock, nwinfo_mock, hydrate_mock
):
nw_info = network_model.NetworkInfo([])
method_mock.return_value = nw_info
hydrate_mock.return_value = nw_info
getattr(self.network_api, method)(*args, **kwargs)
hydrate_mock.assert_called_once_with(nw_info)
self.assertFalse(nwinfo_mock.called)
def test_allocate_for_instance_refresh_cache(self):
sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'],
system_metadata=sys_meta)
vpn = 'fake-vpn'
requested_networks = 'fake-networks'
self._test_refresh_cache('allocate_for_instance', self.context,
instance, vpn, requested_networks)
def test_add_fixed_ip_to_instance_refresh_cache(self):
sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'],
system_metadata=sys_meta)
network_id = 'fake-network-id'
self._test_refresh_cache('add_fixed_ip_to_instance', self.context,
instance, network_id)
def test_remove_fixed_ip_from_instance_refresh_cache(self):
sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'],
system_metadata=sys_meta)
address = 'fake-address'
self._test_refresh_cache('remove_fixed_ip_from_instance', self.context,
instance, address)
@mock.patch('nova.db.fixed_ip_get_by_address')
def test_get_fixed_ip_by_address(self, fip_get):
fip_get.return_value = test_fixed_ip.fake_fixed_ip
fip = self.network_api.get_fixed_ip_by_address(self.context,
'fake-addr')
self.assertIsInstance(fip, objects.FixedIP)
@mock.patch('nova.objects.FixedIP.get_by_id')
def test_get_fixed_ip(self, mock_get_by_id):
mock_get_by_id.return_value = mock.sentinel.fixed_ip
self.assertEqual(mock.sentinel.fixed_ip,
self.network_api.get_fixed_ip(self.context,
mock.sentinel.id))
mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address(self, mock_get_by_floating):
mock_get_by_floating.return_value = objects.FixedIP(
instance_uuid = mock.sentinel.instance_uuid)
self.assertEqual(str(mock.sentinel.instance_uuid),
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.objects.FixedIP.get_by_floating_address')
def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
mock_get_by_floating.return_value = None
self.assertIsNone(
self.network_api.get_instance_id_by_floating_address(
self.context, mock.sentinel.floating))
mock_get_by_floating.assert_called_once_with(self.context,
mock.sentinel.floating)
@mock.patch('nova.network.api.API.migrate_instance_start')
def test_cleanup_instance_network_on_host(self, fake_migrate_start):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.cleanup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_start.assert_called_once_with(
self.context, instance,
{'source_compute': 'fake_compute_source', 'dest_compute': None})
@mock.patch('nova.network.api.API.migrate_instance_finish')
def test_setup_instance_network_on_host(self, fake_migrate_finish):
instance = fake_instance.fake_instance_obj(self.context)
self.network_api.setup_instance_network_on_host(
self.context, instance, 'fake_compute_source')
fake_migrate_finish.assert_called_once_with(
self.context, instance,
{'source_compute': None, 'dest_compute': 'fake_compute_source'})
@mock.patch('nova.network.api.API')
@mock.patch('nova.db.instance_info_cache_update')
class TestUpdateInstanceCache(test.TestCase):
def setUp(self):
super(TestUpdateInstanceCache, self).setUp()
self.context = context.get_admin_context()
self.instance = {'uuid': FAKE_UUID}
vifs = [network_model.VIF(id='super_vif')]
self.nw_info = network_model.NetworkInfo(vifs)
self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info',
self.nw_info)
def test_update_nw_info_none(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, None)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': self.nw_json})
def test_update_nw_info_one_network(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance, self.nw_info)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': self.nw_json})
def test_update_nw_info_empty_list(self, db_mock, api_mock):
api_mock._get_instance_nw_info.return_value = self.nw_info
base_api.update_instance_cache_with_nw_info(api_mock, self.context,
self.instance,
network_model.NetworkInfo([]))
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': '[]'})
def test_decorator_return_object(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
return network_model.NetworkInfo([])
func(api_mock, self.context, self.instance)
self.assertFalse(api_mock._get_instance_nw_info.called)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': '[]'})
def test_decorator_return_none(self, db_mock, api_mock):
@base_api.refresh_cache
def func(self, context, instance):
pass
api_mock._get_instance_nw_info.return_value = self.nw_info
func(api_mock, self.context, self.instance)
api_mock._get_instance_nw_info.assert_called_once_with(self.context,
self.instance)
db_mock.assert_called_once_with(self.context, self.instance['uuid'],
{'network_info': self.nw_json})
class NetworkHooksTestCase(test.BaseHookTestCase):
def test_instance_network_info_hook(self):
info_func = base_api.update_instance_cache_with_nw_info
self.assert_has_hook('instance_network_info', info_func)
| 47.328407
| 79
| 0.641016
|
2b2bb0c71a0556da039769c2d26d23492c4e3b03
| 566
|
py
|
Python
|
shortner/views.py
|
omkumar01/Url-Shortner
|
e86ba3ed1bc5799ef0b38e567fb972209aa05e7c
|
[
"MIT"
] | null | null | null |
shortner/views.py
|
omkumar01/Url-Shortner
|
e86ba3ed1bc5799ef0b38e567fb972209aa05e7c
|
[
"MIT"
] | null | null | null |
shortner/views.py
|
omkumar01/Url-Shortner
|
e86ba3ed1bc5799ef0b38e567fb972209aa05e7c
|
[
"MIT"
] | 3
|
2021-11-28T05:10:36.000Z
|
2021-11-28T05:11:17.000Z
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
import uuid
from .models import InputUrl
# Create your views here.
def index(request):
return render(request, "index.html")
def create(request):
if request.method == "POST":
link = request.POST['link']
uid = str(uuid.uuid4())[:5]
new_url = InputUrl(link=link, uuid=uid)
new_url.save()
return HttpResponse(uid)
def go(request, pk):
url_details = InputUrl.objects.get(uuid=pk)
return redirect(f"http://{url_details.link}")
| 23.583333
| 49
| 0.674912
|
87b30017b199ecbcbadbc20bd6c9da9f1304879f
| 1,397
|
py
|
Python
|
Courses/Udacity/CS101/Lesson_9_Problem_Set/04-Producing_a_WebCorpus/supplied/studentMain.py
|
leparrav/Playground
|
dcb90a2dd2bc1867511cfe621eb21248a60e357f
|
[
"Unlicense"
] | 1
|
2019-02-13T12:02:26.000Z
|
2019-02-13T12:02:26.000Z
|
Courses/Udacity/CS101/Lesson_9_Problem_Set/04-Producing_a_WebCorpus/supplied/studentMain.py
|
leparrav/Playground
|
dcb90a2dd2bc1867511cfe621eb21248a60e357f
|
[
"Unlicense"
] | 1
|
2018-08-13T15:58:33.000Z
|
2018-08-13T15:58:33.000Z
|
Courses/Udacity/CS101/Lesson_9_Problem_Set/04-Producing_a_WebCorpus/supplied/studentMain.py
|
leparrav/Playground
|
dcb90a2dd2bc1867511cfe621eb21248a60e357f
|
[
"Unlicense"
] | 2
|
2017-08-10T20:01:29.000Z
|
2021-07-01T08:39:13.000Z
|
### Modify the crawler code to return a WebCorpus object.
### You will need to add an import and modify the crawl_web function.
### After your changes, the provided test code below should work.
### You should do your modifications to the crawler.py file
from crawler import crawl_web, compute_ranks
from search import lucky_search, ordered_search
from webcorpus import WebCorpus
def test_engine():
print "Testing..."
kathleen = 'http://udacity.com/cs101x/urank/kathleen.html'
nickel = 'http://udacity.com/cs101x/urank/nickel.html'
arsenic = 'http://udacity.com/cs101x/urank/arsenic.html'
hummus = 'http://udacity.com/cs101x/urank/hummus.html'
indexurl = 'http://udacity.com/cs101x/urank/index.html'
wcorpus = crawl_web('http://udacity.com/cs101x/urank/index.html')
assert isinstance(wcorpus, WebCorpus)
ranks = compute_ranks(wcorpus.graph)
assert lucky_search(wcorpus.index, ranks, 'Hummus') == kathleen
assert ordered_search(wcorpus.index, ranks, 'Hummus') == [kathleen, nickel, arsenic, hummus, indexurl]
assert lucky_search(wcorpus.index, ranks, 'the') == nickel
assert ordered_search(wcorpus.index, ranks, 'the') == [nickel, arsenic, hummus, indexurl]
assert lucky_search(wcorpus.index, ranks, 'babaganoush') == None
assert ordered_search(wcorpus.index, ranks, 'babaganoush') == None
print "Finished tests."
test_engine()
| 46.566667
| 107
| 0.730852
|
c1e6a4bffa2a1e0d2bfd14d4ef66b09028c450ea
| 1,097
|
py
|
Python
|
greeting/servidor.py
|
javalisson/Sockets
|
90068c0b5a4b2f21ca789177c3c445c671732a86
|
[
"MIT"
] | 2
|
2017-04-26T11:17:56.000Z
|
2017-12-05T01:55:20.000Z
|
greeting/servidor.py
|
javalisson/Sockets
|
90068c0b5a4b2f21ca789177c3c445c671732a86
|
[
"MIT"
] | 2
|
2017-02-22T12:35:13.000Z
|
2017-03-29T12:44:22.000Z
|
greeting/servidor.py
|
javalisson/Sockets
|
90068c0b5a4b2f21ca789177c3c445c671732a86
|
[
"MIT"
] | 24
|
2017-02-22T12:26:04.000Z
|
2020-10-13T05:19:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# adaptado de https://wiki.python.org/moin/TcpCommunication
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 5005
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
resposta = None
print ("[SERVIDOR] Iniciando")
print ("[SERVIDOR] Abrindo a porta " + str(TCP_PORT) + " e ouvindo")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
print ("[SERVIDOR] Aguardando conexao")
conn, addr = s.accept()
print ('[SERVIDOR] Conexao com o cliente realizada. Endereco da conexao:', addr)
while 1:
print ("[SERVIDOR] Aguardando dados do cliente")
nome = conn.recv(BUFFER_SIZE)
if not nome: break
print ("[SERVIDOR] Dados recebidos do cliente com sucesso: \"" + nome.decode('utf-8') + "\"")
resposta = "Ola, " + nome.decode('utf-8')
print ("[SERVIDOR] Enviando resposta para o cliente")
conn.send(resposta.encode()) # echo
print ("[SERVIDOR] Resposta enviada: \"" + resposta + "\"")
print ("[SERVIDOR] Fechando a porta " + str(TCP_PORT))
conn.close()
print ("[SERVIDOR] Fim")
| 31.342857
| 97
| 0.675479
|
d231f43d7c614b5f512612e4f681b650f69190ce
| 371
|
py
|
Python
|
tests/libtest/test_pod_exec.py
|
phlogistonjohn/ocs-ci
|
38223e18ca2e1db7a24cc9bdb76e38d2ba4e6f12
|
[
"MIT"
] | null | null | null |
tests/libtest/test_pod_exec.py
|
phlogistonjohn/ocs-ci
|
38223e18ca2e1db7a24cc9bdb76e38d2ba4e6f12
|
[
"MIT"
] | null | null | null |
tests/libtest/test_pod_exec.py
|
phlogistonjohn/ocs-ci
|
38223e18ca2e1db7a24cc9bdb76e38d2ba4e6f12
|
[
"MIT"
] | 1
|
2020-07-28T07:32:09.000Z
|
2020-07-28T07:32:09.000Z
|
import os
os.sys.path.append(os.path.dirname(os.getcwd()))
from ocs_ci.framework.testlib import libtest
from ocs_ci.ocs.resources import pod
@libtest
def test_main():
tools_pod = pod.get_ceph_tools_pod()
cmd = "ceph osd df"
out, err, ret = tools_pod.exec_ceph_cmd(ceph_cmd=cmd)
if out:
print(out)
if err:
print(err)
print(ret)
| 19.526316
| 57
| 0.67655
|
4fb9731576c55f30fdb45e190ee2a3a79a2b52c6
| 11,452
|
py
|
Python
|
jobs/transforms/skill_validation_test.py
|
sajalasati/oppia
|
b0c6ffb917663fb6482022d0f607377f7e1ee3d0
|
[
"Apache-2.0"
] | 1
|
2021-08-30T06:53:15.000Z
|
2021-08-30T06:53:15.000Z
|
jobs/transforms/skill_validation_test.py
|
abhyareddy/oppia
|
4c07dd16e5503f6ee70f1774e9754b6db266aff4
|
[
"Apache-2.0"
] | 11
|
2021-03-03T07:21:27.000Z
|
2022-03-12T01:03:44.000Z
|
jobs/transforms/skill_validation_test.py
|
sajalasati/oppia
|
b0c6ffb917663fb6482022d0f607377f7e1ee3d0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.transforms.skill_validation."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.platform import models
from jobs import job_test_utils
from jobs.transforms import skill_validation
from jobs.types import base_validation_errors
import apache_beam as beam
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import skill_models
(base_models, skill_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.skill])
class ValidateSkillSnapshotMetadataModelTests(job_test_utils.PipelinedTestBase):
def test_validate_change_domain_implemented(self) -> None:
valid_commit_cmd_model = skill_models.SkillSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer-id',
commit_type='delete',
commit_cmds=[{
'cmd': base_models.VersionedModel.CMD_DELETE_COMMIT}])
output = (
self.pipeline
| beam.Create([valid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillSnapshotMetadataModel())
)
self.assert_pcoll_equal(output, [])
def test_skill_change_object_with_missing_cmd(self) -> None:
invalid_commit_cmd_model = skill_models.SkillSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer-id',
commit_type='delete',
commit_cmds=[{'invalid': 'data'}])
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'invalid': 'data'},
'Missing cmd key in change dict')
])
def test_skill_change_object_with_invalid_cmd(self) -> None:
invalid_commit_cmd_model = skill_models.SkillSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer-id',
commit_type='delete',
commit_cmds=[{'cmd': 'invalid'}])
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
{'cmd': 'invalid'},
'Command invalid is not allowed')
])
def test_skill_change_object_with_missing_attribute_in_cmd(self) -> None:
commit_dict = {
'cmd': 'update_skill_property',
'property_name': 'name',
}
invalid_commit_cmd_model = skill_models.SkillSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer-id',
commit_type='edit',
commit_cmds=[commit_dict])
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
commit_dict,
'The following required attributes are missing: '
'new_value, old_value')
])
def test_skill_change_object_with_extra_attribute_in_cmd(self) -> None:
commit_dict = {
'cmd': 'add_skill_misconception',
# Key new_misconception_dict stores a string because dict
# keeps on rearranging themselves so tests are not passing.
'new_misconception_dict': '{u\'id\': 0, u\'notes\': '
'u\'<p>notes</p>\', u\'feedback\': '
'u\'<p>default_feedback</p>\', '
'u\'name\': u\'name\'}',
'invalid': 'invalid'
}
invalid_commit_cmd_model = skill_models.SkillSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer-id',
commit_type='create',
commit_cmds=[commit_dict]
)
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
commit_dict,
'The following extra attributes are present: invalid')
])
def test_skill_change_object_with_invalid_skill_property(self) -> None:
commit_dict = {
'cmd': 'update_skill_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
}
invalid_commit_cmd_model = skill_models.SkillSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer-id',
commit_type='edit',
commit_cmds=[commit_dict])
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
commit_dict,
'Value for property_name in cmd update_skill_property: '
'invalid is not allowed')
])
def test_skill_change_object_with_invalid_skill_misconceptions(
self
) -> None:
commit_dict = {
'cmd': 'update_skill_misconceptions_property',
'misconception_id': 'id',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
}
invalid_commit_cmd_model = skill_models.SkillSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer-id',
commit_type='create',
commit_cmds_user_ids=[
'commit_cmds_user_1_id', 'commit_cmds_user_2_id'],
content_user_ids=['content_user_1_id', 'content_user_2_id'],
commit_cmds=[commit_dict])
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
commit_dict,
'Value for property_name in cmd '
'update_skill_misconceptions_property: invalid is not '
'allowed')
])
def test_skill_change_object_with_invalid_skill_contents_property(
self
) -> None:
commit_dict = {
'cmd': 'update_skill_contents_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
}
invalid_commit_cmd_model = skill_models.SkillSnapshotMetadataModel(
id='123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
committer_id='committer-id',
commit_type='create',
commit_cmds_user_ids=[
'commit_cmds_user_1_id', 'commit_cmds_user_2_id'],
content_user_ids=['content_user_1_id', 'content_user_2_id'],
commit_cmds=[commit_dict])
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillSnapshotMetadataModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsValidateError(
invalid_commit_cmd_model,
commit_dict,
'Value for property_name in cmd '
'update_skill_contents_property: invalid is not allowed')
])
class ValidateSkillCommitLogEntryModelTests(job_test_utils.PipelinedTestBase):
def test_validate_skill_model(self) -> None:
valid_commit_cmd_model = skill_models.SkillCommitLogEntryModel(
id='skill_id123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
skill_id='skill-id',
user_id='user-id',
commit_type='test-type',
post_commit_status='private',
commit_cmds=[{'cmd': 'create_new'}])
output = (
self.pipeline
| beam.Create([valid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillCommitLogEntryModel())
)
self.assert_pcoll_equal(output, [])
def test_raises_commit_cmd_none_error(self) -> None:
invalid_commit_cmd_model = skill_models.SkillCommitLogEntryModel(
id='model_id123',
created_on=self.YEAR_AGO,
last_updated=self.NOW,
skill_id='skill-id',
user_id='user-id',
commit_type='test-type',
post_commit_status='private',
commit_cmds=[{'cmd': 'create_new'}])
output = (
self.pipeline
| beam.Create([invalid_commit_cmd_model])
| beam.ParDo(
skill_validation.ValidateSkillCommitLogEntryModel())
)
self.assert_pcoll_equal(
output, [
base_validation_errors.CommitCmdsNoneError(
invalid_commit_cmd_model)
])
| 35.565217
| 80
| 0.585313
|
b7e29b3ba028b36db32acc4729bb7ae9701891f2
| 315
|
py
|
Python
|
Sqlite/dbmanager.py
|
Gambl3r08/ejercicios-Python
|
ddf13b40c611f892112ebbe7bc907f5765998ea0
|
[
"MIT"
] | null | null | null |
Sqlite/dbmanager.py
|
Gambl3r08/ejercicios-Python
|
ddf13b40c611f892112ebbe7bc907f5765998ea0
|
[
"MIT"
] | null | null | null |
Sqlite/dbmanager.py
|
Gambl3r08/ejercicios-Python
|
ddf13b40c611f892112ebbe7bc907f5765998ea0
|
[
"MIT"
] | null | null | null |
import sqlite3
def createDB(name: str):
try:
db = sqlite3.connect(name)
return db
except:
print("error al crear la base de datos: ", name)
def createCursor(db: sqlite3):
try:
cursor = db.cursor()
return cursor
except:
print("Error al crear cursor")
| 19.6875
| 56
| 0.584127
|
67e0b6df0ed7fc49ba0e561d404228b003fbd70e
| 8,576
|
py
|
Python
|
ballet/templating.py
|
HDI-Project/fhub_core
|
9667a47fbd8b4caf2e92118dc5357f34aae2098b
|
[
"MIT"
] | 19
|
2021-04-06T18:56:39.000Z
|
2022-03-15T00:23:00.000Z
|
ballet/templating.py
|
HDI-Project/ballet
|
9667a47fbd8b4caf2e92118dc5357f34aae2098b
|
[
"MIT"
] | 52
|
2018-09-27T01:11:58.000Z
|
2021-03-24T19:11:18.000Z
|
ballet/templating.py
|
HDI-Project/ballet
|
9667a47fbd8b4caf2e92118dc5357f34aae2098b
|
[
"MIT"
] | 3
|
2019-12-07T17:55:34.000Z
|
2021-02-02T17:58:39.000Z
|
import pathlib
import tempfile
from typing import List, Optional, Tuple
import funcy as fy
from cookiecutter.main import cookiecutter as _cookiecutter
from github import Github, GithubException
import ballet.util.git
from ballet.compat import PathLike
from ballet.exc import BalletError, ConfigurationError
from ballet.project import Project, detect_github_username
from ballet.util.fs import pwalk, synctree
from ballet.util.git import (
DEFAULT_BRANCH, push_branches_to_remote, switch_to_new_branch,)
from ballet.util.log import logger
from ballet.util.typing import Pathy
from ballet.validation.project_structure.checks import (
FEATURE_MODULE_NAME_REGEX, SUBPACKAGE_NAME_REGEX,)
TEMPLATES_PATH = pathlib.Path(__file__).resolve().parent.joinpath('templates')
FEATURE_TEMPLATE_PATH = TEMPLATES_PATH.joinpath('feature_template')
PROJECT_TEMPLATE_PATH = TEMPLATES_PATH.joinpath('project_template')
def _stringify_path(obj) -> str:
return str(obj) if isinstance(obj, PathLike) else obj
@fy.wraps(_cookiecutter)
def cookiecutter(*args, **kwargs) -> str:
"""Call cookiecutter.main.cookiecutter after stringifying paths
Return:
project directory path
"""
args = fy.walk(_stringify_path, args)
kwargs = fy.walk_values(_stringify_path, kwargs)
return _cookiecutter(*args, **kwargs)
def render_project_template(
project_template_path: Optional[Pathy] = None,
create_github_repo: bool = False,
github_token: Optional[str] = None,
**cc_kwargs
) -> str:
"""Generate a ballet project according to the project template
If creating the GitHub repo is requested and the process fails for any
reason, quickstart will complete successfully and users are instructed
to read the corresponding section of the Maintainer's Guide to continue
manually.
Args:
project_template_path: path to specific project template
create_github_repo: whether to act to create the desired repo on
GitHub after rendering the project. The repo will be owned by
either the user or an org that the user has relevant permissions
for, depending on what is entered during the quickstart prompts.
If True, then a valid github token must also be provided.
github_token: valid github token with appropriate permissions
**cc_kwargs: options for the cookiecutter template
"""
if project_template_path is None:
project_template_path = PROJECT_TEMPLATE_PATH
project_path = cookiecutter(project_template_path, **cc_kwargs)
if create_github_repo:
if github_token is None:
raise ValueError('Need to provide github token')
g = Github(github_token)
# need to get params from new project config
project = Project.from_path(project_path)
owner = project.config.get('github.github_owner')
name = project.config.get('project.project_slug')
# create repo on github
try:
github_repo = ballet.util.git.create_github_repo(g, owner, name)
logger.info(f'Created repo on GitHub at {github_repo.html_url}')
except GithubException:
logger.exception('Failed to create GitHub repo for this project')
logger.warning(
'Failed to create GitHub repo for this project...\n'
'did you specify the intended repo owner, and do you have'
' permissions to create a repo under that owner?\n'
'Try manually creating the repo: https://ballet.github.io/ballet/maintainer_guide.html#manual-repository-creation' # noqa E501
)
return project_path
# now push to remote
# we don't need to set up the remote, as it has already been setup in
# post_gen_hook.py
local_repo = project.repo
remote_name = project.config.get('github.remote')
branches = [DEFAULT_BRANCH]
try:
push_branches_to_remote(local_repo, remote_name, branches)
except BalletError:
logger.exception('Failed to push branches to GitHub repo')
logger.warning(
'Failed to push branches to GitHub repo...\n'
'Try manually pushing the branches: https://ballet.github.io/ballet/maintainer_guide.html#manual-repository-creation' # noqa E501
)
return project_path
return project_path
def render_feature_template(**cc_kwargs) -> str:
"""Create a stub for a new feature
Args:
**cc_kwargs: options for the cookiecutter template
"""
feature_template_path = FEATURE_TEMPLATE_PATH
return cookiecutter(feature_template_path, **cc_kwargs)
def _fail_if_feature_exists(dst: pathlib.Path) -> None:
subpackage_name, feature_name = str(dst.parent), str(dst.name)
if (
dst.is_file()
and fy.re_test(SUBPACKAGE_NAME_REGEX, subpackage_name)
and fy.re_test(FEATURE_MODULE_NAME_REGEX, feature_name)
):
raise FileExistsError(f'The feature already exists here: {dst}')
def start_new_feature(
contrib_dir: Pathy = None,
branching: bool = True,
**cc_kwargs
) -> List[Tuple[pathlib.Path, str]]:
"""Start a new feature within a ballet project
If run from default branch, by default will attempt to switch to a new
branch for this feature, given by `<username>/feature-<featurename>`. By
default, will prompt the user for input using cookiecutter's input
interface.
Renders the feature template into a temporary directory, then copies the
feature files into the proper path within the contrib directory.
Args:
contrib_dir: directory under which to place contributed features
branching: whether to attempt to manage branching
**cc_kwargs: options for the cookiecutter template
Raises:
ballet.exc.BalletError: the new feature has the same name as an
existing one
"""
if contrib_dir is not None:
try:
project = Project.from_path(contrib_dir, ascend=True)
default_username = detect_github_username(project)
except ConfigurationError:
default_username = 'username'
else:
project = Project.from_cwd()
contrib_dir = project.config.get('contrib.module_path')
default_username = detect_github_username(project)
# inject default username into context
cc_kwargs.setdefault('extra_context', {})
cc_kwargs['extra_context'].update({'_default_username': default_username})
with tempfile.TemporaryDirectory() as tempdir:
# render feature template
output_dir = tempdir
cc_kwargs['output_dir'] = output_dir
rendered_dir = render_feature_template(**cc_kwargs)
# clean pyc files from rendered dir
for path in pwalk(rendered_dir, topdown=False):
if path.suffix == '.pyc':
path.unlink()
if path.name == '__pycache__':
with fy.suppress(OSError):
path.rmdir()
# copy into contrib dir
src = rendered_dir
dst = contrib_dir
result = synctree(src, dst, onexist=_fail_if_feature_exists)
target_branch = None
if branching and project.on_master:
# try to set the target branch name
paths = [path for path, kind in result if kind == 'file']
for path in paths:
parts = pathlib.Path(path).parts
subpackage, module = parts[-2], parts[-1]
user_match = fy.re_find(SUBPACKAGE_NAME_REGEX, subpackage)
feature_match = fy.re_find(FEATURE_MODULE_NAME_REGEX, module)
if feature_match:
username = user_match['username']
featurename = feature_match['featurename'].replace('_', '-')
target_branch = f'{username}/feature-{featurename}'
if target_branch is not None:
switch_to_new_branch(project.repo, target_branch)
_log_start_new_feature_success(result)
_log_switch_to_new_branch(target_branch)
return result
def _log_start_new_feature_success(result: List[Tuple[pathlib.Path, str]]):
logger.info('Start new feature successful')
for (name, kind) in result:
if kind == 'file' and '__init__' not in str(name):
relname = pathlib.Path(name).relative_to(pathlib.Path.cwd())
logger.info(f'Created {relname}')
def _log_switch_to_new_branch(branch: Optional[str]):
if branch is not None:
logger.info(f'Switched to branch {branch}')
| 37.946903
| 146
| 0.684818
|
d95bcc0b165e225e7a53304a0a180fb1b4e9e179
| 1,239
|
py
|
Python
|
appengine/findit/model/flake/analysis/triggering_sources.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 2
|
2021-04-13T21:22:18.000Z
|
2021-09-07T02:11:57.000Z
|
appengine/findit/model/flake/analysis/triggering_sources.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 21
|
2020-09-06T02:41:05.000Z
|
2022-03-02T04:40:01.000Z
|
appengine/findit/model/flake/analysis/triggering_sources.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sources for from where a flake analysis was triggered."""
# An analysis was triggered directly through Findit's UI.
FINDIT_UI = 1
# An analysis was triggered using Findit's API.
FINDIT_API = 2
# An analysis was triggered using Findit's normal analysis pipeline.
FINDIT_PIPELINE = 3
SOURCES_TO_DESCRIPTIONS = {
FINDIT_UI: 'Findit UI',
FINDIT_API: 'Findit API',
FINDIT_PIPELINE: 'Findit pipeline'
}
def GetDescriptionForTriggeringSource(triggering_source, manually_triggered):
"""Returns a human-readable description for where a request came from."""
template = 'The analysis was triggered %s through %s'
def _GetTriggeringSourceDescription(triggering_source):
return SOURCES_TO_DESCRIPTIONS.get(triggering_source,
'other/unknown source')
def _GetTriggeringUserDescription(manually_triggered):
return 'manually' if manually_triggered else 'automatically'
return template % (_GetTriggeringUserDescription(manually_triggered),
_GetTriggeringSourceDescription(triggering_source))
| 35.4
| 77
| 0.749798
|
76f04580b076f2d0fd5869e105c3271108e7afa5
| 1,642
|
py
|
Python
|
pytest-fixture-config/tests/unit/test_fixture_config.py
|
domdfcoding/pytest-plugins
|
01fc12644db92e83ad7646a2d3c0b4f4ac1bd5a1
|
[
"MIT"
] | null | null | null |
pytest-fixture-config/tests/unit/test_fixture_config.py
|
domdfcoding/pytest-plugins
|
01fc12644db92e83ad7646a2d3c0b4f4ac1bd5a1
|
[
"MIT"
] | null | null | null |
pytest-fixture-config/tests/unit/test_fixture_config.py
|
domdfcoding/pytest-plugins
|
01fc12644db92e83ad7646a2d3c0b4f4ac1bd5a1
|
[
"MIT"
] | null | null | null |
import pytest
from six.moves import reload_module
# HACK: if the plugin is imported before the coverage plugin then all
# the top-level code will be omitted from coverage, so force it to be
# reloaded within this unit test under coverage
import pytest_fixture_config
reload_module(pytest_fixture_config)
from pytest_fixture_config import Config, requires_config, yield_requires_config
class DummyConfig(Config):
__slots__ = ('foo', 'bar')
def test_config_update():
cfg = DummyConfig(foo=1,
bar=2
)
cfg.update({"foo": 10, "bar":20})
assert cfg.foo == 10
assert cfg.bar == 20
with pytest.raises(ValueError):
cfg.update({"baz": 30})
CONFIG1 = DummyConfig(foo=None, bar=1)
@pytest.fixture
@requires_config(CONFIG1, ('foo', 'bar'))
def a_fixture(request):
raise ValueError('Should not run')
def test_requires_config_skips(a_fixture):
raise ValueError('Should not run')
@pytest.fixture
@requires_config(CONFIG1, ('bar',))
def another_fixture(request):
return 'xxxx'
def test_requires_config_doesnt_skip(another_fixture):
assert another_fixture == 'xxxx'
@pytest.fixture()
@yield_requires_config(CONFIG1, ('foo', 'bar'))
def yet_another_fixture():
raise ValueError('Should also not run')
yield 'yyyy'
def test_yield_requires_config_skips(yet_another_fixture):
raise ValueError('Should also not run')
@pytest.fixture()
@yield_requires_config(CONFIG1, ('bar',))
def yet_some_other_fixture():
yield 'yyyy'
def test_yield_requires_config_doesnt_skip(yet_some_other_fixture):
assert yet_some_other_fixture == 'yyyy'
| 24.147059
| 80
| 0.722899
|
3b6e2cd9de0263e4713cd42294b19f8b26ff5128
| 5,413
|
py
|
Python
|
markovdwp/runtime/dwp.py
|
ivannz/MarkovDWP
|
f10ed7a331ddd9b7fc28c4cab3b05b2352a9ee2b
|
[
"MIT"
] | null | null | null |
markovdwp/runtime/dwp.py
|
ivannz/MarkovDWP
|
f10ed7a331ddd9b7fc28c4cab3b05b2352a9ee2b
|
[
"MIT"
] | null | null | null |
markovdwp/runtime/dwp.py
|
ivannz/MarkovDWP
|
f10ed7a331ddd9b7fc28c4cab3b05b2352a9ee2b
|
[
"MIT"
] | null | null | null |
import os
import torch
from functools import partial
from collections.abc import Hashable
from torch.utils.data import DataLoader
from .base import BaseRuntime
from ..nn import named_penalties
from ..priors import ImplicitSlicePrior
from ..priors.implicit import ImplicitPrior
from ..source import KernelDataset
from ..utils.dicttools import propagate, add_prefix, resolve
from ..utils.runtime import get_instance
from ..utils.io import load
def unpack(state):
module = get_instance(**state['model'])
module.load_state_dict(state['state'])
return module
def load_prior(path, kind='trainable'):
assert kind in ('collapsed', 'fixed', 'trainable')
snapshot = load(path)
decoder = unpack(snapshot['decoder']).requires_grad_(False)
encoder = unpack(snapshot['encoder'])
encoder.requires_grad_(kind == 'trainable')
if kind == 'collapsed':
# assumes the encoder has `event_shape`, like vaes in model.dwp
encoder = encoder.event_shape
return ImplicitSlicePrior(decoder, encoder)
def load_priors(**priors):
lookup, loaded = {}, {}
for name, prior in priors.items():
if isinstance(prior, dict):
loaded[name] = load_prior(**prior)
elif isinstance(prior, Hashable):
lookup[name] = prior
else:
raise TypeError(f'Bad shared Prior reference `{prior}`.')
lookup = resolve(lookup) # detect cyclical and resolve linear references
missing = [ref for ref in lookup.values() if ref not in loaded]
if missing:
raise ValueError(f'Missing Priors detected `{missing}`.')
return {name: loaded[lookup.get(name, name)] for name in priors}
def from_source(module, root, source):
"""Draw kernels from the empirical distribution of source slices."""
weight = getattr(module, 'weight', None)
assert isinstance(weight, torch.Tensor)
assert os.path.isdir(root)
info = KernelDataset.info(root)
assert source in info
# open the dataset
dataset = KernelDataset(root, source, dim='mio', min_norm=0.1)
# setup the dataloader
c_out, c_in, *dontcare = weight.shape
sample, *dontcare = next(iter(DataLoader(dataset, batch_size=c_in * c_out,
shuffle=True, num_workers=8)))
weight.data.copy_(sample.reshape_as(weight))
return module
def from_prior(module, prior):
weight = getattr(module, 'weight', None)
assert isinstance(weight, torch.Tensor)
assert isinstance(prior, ImplicitPrior)
weight.data.copy_(prior.sample(weight.shape))
return module
def init(module, priors, specs, prefix=''):
for name, mod in module.named_modules(prefix=prefix):
# silently assume missing inits use default and ignore them
init = specs.get(name, 'default')
if init == 'default':
pass
elif init == 'prior':
# sample filter from the associated prior
from_prior(mod, priors.get(name))
elif isinstance(init, dict):
from_source(mod, **{'source': name, **init})
else:
raise TypeError(f'Bad init spec `{init}`.')
return module
class BaseDWPRuntime(BaseRuntime):
r"""ELBO for Bayesian NN with var. approx. `q`.
$$
\mathcal{L}_{classic}
= \mathbb{E}_{w \sim q} \log p(D \mid w)
- \mathbb{E}_{w \sim q} \log \frac{q(w)}{\pi(w)}
\,, $$
If $\pi(w) = \mathbb{E}_{h \sim \pi} p(w \mid h)$, i.e. an implicit prior,
then for any $r(h\mid w)$ it computes the secondary lower bound:
$$
\mathcal{L}_{implicit}
= \mathbb{E}_{w \sim q} \log p(D \mid w)
- \mathbb{E}_{w \sim q} \mathbb{E}_{h \sim r(h|w)}
\log \frac{q(w) r(h \mid w)}{p(w \mid h) \pi(h)}
\,, $$
"""
def __init__(self, core, *, coef, lr, kind, priors, init):
assert kind in ('classic', 'implicit')
super().__init__(core, coef=coef, lr=lr)
self.kind, self.init = kind, init
self.priors = load_priors(**priors)
# shadow list to register priors with `torch.nn.Module`
self._priors = torch.nn.ModuleList(self.priors.values())
# disable grads if we use non-implicit priors (implit are likely
# used just for init).
if self.kind != 'implicit':
self._priors.requires_grad_(False)
def on_train_start(self):
# we are on device, so re-init model here
init(self.core, self.priors, self.init, prefix='')
def training_penalty(self, outputs=None, prefix=''):
"""KL of parameter distrib from prior."""
tag = prefix + ('.' if prefix else '') + 'kl_div'
penalties = {}
if self.kind == 'implicit':
# compile the penalties LUT using the current coeffs
priors = add_prefix(self.priors, tag)
coef = dict(propagate({'': 1.0, **self.coef}, priors))
penalties = {
name: partial(prior.penalty, coef=coef[name],
n_draws_q=1, n_draws_r=1)
for name, prior in priors.items()
}
# Call penalties with interface from `cplxmodule.nn.relevance`
# * if a layer has no overrider penalty, then its `built-in` is used
return dict(named_penalties(self.core, penalties=penalties,
prefix=tag, reduction='sum'))
| 31.109195
| 78
| 0.617403
|
b53aac137d1e36f0422b533f3cd1cf01eb82c122
| 1,271
|
py
|
Python
|
train_model.py
|
VarnithChordia/Multlingual_Punctuation_restoration
|
17c026e8935b9fecae01d446a756926c7733fcd1
|
[
"MIT"
] | 8
|
2020-07-24T05:50:54.000Z
|
2022-02-17T00:16:07.000Z
|
train_model.py
|
VarnithChordia/Multlingual_Punctuation_restoration
|
17c026e8935b9fecae01d446a756926c7733fcd1
|
[
"MIT"
] | 4
|
2021-04-22T12:27:22.000Z
|
2022-03-12T00:59:43.000Z
|
train_model.py
|
VarnithChordia/Multlingual_Punctuation_restoration
|
17c026e8935b9fecae01d446a756926c7733fcd1
|
[
"MIT"
] | null | null | null |
import os
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import sys
import warnings
from modules.models.bert_models import BERTBiLSTMNCRFJoint
from modules.train.train import NerLearner
from modules.data import bert_data_new
warnings.filterwarnings("ignore")
sys.path.insert(0, "../")
data = bert_data_new.LearnData.create(
train_df_path="/data/", ## Location of the train dataset
valid_df_path="/data/", ## Location of the val dataset
idx2labels_path="../idx2labels.txt", ## Location to store punctuation labels
idx2cls_path= '../idx2cls.txt', ## Location to store language labels
idx2mode_path='../idx2mode.txt', ## Location to store text mode labels
is_cls = True,
clear_cache=True,
model_name='bert-base-multilingual-cased' ## Language model
)
model = BERTBiLSTMNCRFJoint.create(
len(data.train_ds.idx2label),
model_name='bert-base-multilingual-cased',
lstm_dropout=0.,
crf_dropout=0.3,
intent_size=3,
mode_size=2)
num_epochs = 10
learner = NerLearner(
model, data, "/data/models/LRL_multilingual-cased_mbert.cpt", ## Location to store the model
t_total=num_epochs * len(data.train_dl))
model.get_n_trainable_params()
learner.fit(epochs=num_epochs)
| 28.244444
| 96
| 0.736428
|
6964c05194e91acb5b9a4c136134e18dd8b24c95
| 3,629
|
py
|
Python
|
util/generateCoverage.py
|
enm10k/former2
|
0e116a910d3b4d591756817b5987358a81733e1b
|
[
"MIT"
] | null | null | null |
util/generateCoverage.py
|
enm10k/former2
|
0e116a910d3b4d591756817b5987358a81733e1b
|
[
"MIT"
] | null | null | null |
util/generateCoverage.py
|
enm10k/former2
|
0e116a910d3b4d591756817b5987358a81733e1b
|
[
"MIT"
] | null | null | null |
import os
import json
import pprint
import math
import re
services = None
cfn_spec = None
tf_resources = []
cfn_types = []
cfn_occurances = []
tf_occurances = []
cfn_exceptions = {
'AWS::CloudFormation::CustomResource': 'N/A',
'AWS::CloudFormation::Macro': 'N/A',
'AWS::CloudFormation::Stack': 'N/A',
'AWS::CloudFormation::WaitCondition': 'N/A',
'AWS::CloudFormation::WaitConditionHandle': 'N/A',
'AWS::EC2::SecurityGroupEgress': 'N/A',
'AWS::EC2::SecurityGroupIngress': 'N/A',
'AWS::RDS::DBSecurityGroupIngress': 'N/A',
'AWS::ElastiCache::SecurityGroupIngress': 'N/A',
'AWS::Redshift::ClusterSecurityGroupIngress': 'N/A',
'AWS::Route53::RecordSetGroup': 'N/A',
'AWS::SDB::Domain': 'N/A',
'AWS::IAM::UserToGroupAddition': 'N/A',
'Alexa::ASK::Skill': 'N/A',
'AWS::ServiceCatalog::PortfolioShare': 'N/A',
'AWS::SecretsManager::SecretTargetAttachment': 'N/A',
'AWS::ServiceCatalog::ResourceUpdateConstraint': 'N/A',
'AWS::ACMPCA::Certificate': 'N/A'
}
tf_exceptions = {
'aws_cloudformation_stack': 'N/A',
'aws_cloudformation_stack_set': 'N/A',
'aws_cloudformation_stack_set_instance': 'N/A',
'aws_dx_hosted_public_virtual_interface_accepter': 'N/A',
'aws_dx_hosted_private_virtual_interface_accepter': 'N/A',
'aws_simpledb_domain': 'N/A'
}
with open("util/cfnspec.json", "r") as f:
cfn_spec = json.loads(f.read())['ResourceTypes']
with open("util/tf_resources.txt", "r") as f:
lines = f.read().splitlines()
for line in lines:
tf_resources.append(line)
for servicefilename in os.listdir("js/services"):
with open("js/services/" + servicefilename, "r") as f:
text = f.read()
lines = text.splitlines()
cfn_occurances += re.compile(r'(AWS\:\:[a-zA-Z0-9]+\:\:[a-zA-Z0-9]+)').findall(text)
tf_occurances += re.compile(r'terraformType\'\:\ \'(aws(?:\_[a-zA-Z0-9]+)+)\'').findall(text)
for cfntype, _ in cfn_spec.items():
cfn_types.append(cfntype)
for cfn_occurance in cfn_occurances:
if cfn_occurance not in cfn_types:
print("Resource not in spec: " + cfn_occurance)
cfn_types.append(cfn_occurance)
cfn_types = set(cfn_types)
total_services = 0
total_operations = 0
total_unique_occurances = 0
with open("RESOURCE_COVERAGE.md", "w") as f:
f.write("## CloudFormation Resource Coverage\n\n")
f.write("**%s/%s (%s%%)** Resources Covered\n" % (
len(set(cfn_occurances)) + len(cfn_exceptions),
len(cfn_types),
int(math.floor((len(set(cfn_occurances)) + len(cfn_exceptions)) * 100 / len(cfn_types)))
))
f.write("\n| Type | Coverage |\n")
f.write("| --- | --- |\n")
for cfntype in sorted(cfn_types):
coverage = ""
if cfn_occurances.count(cfntype) > 0:
coverage = ":thumbsup:"
if cfntype in cfn_exceptions:
coverage = cfn_exceptions[cfntype]
f.write("| *%s* | %s |\n" % (cfntype, coverage))
f.write("\n## Terraform Coverage\n\n")
f.write("**%s/%s (%s%%)** Resources Covered\n" % (
len(set(tf_occurances)) + len(tf_exceptions),
len(tf_resources),
int(math.floor((len(set(tf_occurances)) + len(tf_exceptions)) * 100 / len(tf_resources)))
))
f.write("\n| Type | Coverage |\n")
f.write("| --- | --- |\n")
for tf_resource in sorted(tf_resources):
coverage = ""
if tf_occurances.count(tf_resource) > 0:
coverage = ":thumbsup:"
if tf_resource in tf_exceptions:
coverage = tf_exceptions[tf_resource]
f.write("| *%s* | %s |\n" % (tf_resource, coverage))
| 33.915888
| 101
| 0.625241
|
acab6dddff97749e9f5f617bfd3958ca98a7b61d
| 42,306
|
py
|
Python
|
UI/moxa.py
|
Subhadip-decode/ShittyBots
|
0b3563053a280f451b1d20165b2a8b41169c1d0d
|
[
"BSD-3-Clause"
] | null | null | null |
UI/moxa.py
|
Subhadip-decode/ShittyBots
|
0b3563053a280f451b1d20165b2a8b41169c1d0d
|
[
"BSD-3-Clause"
] | null | null | null |
UI/moxa.py
|
Subhadip-decode/ShittyBots
|
0b3563053a280f451b1d20165b2a8b41169c1d0d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Moxa.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import detect
from subprocess import call
from PyQt5.QtWidgets import *
import sys # We need sys so that we can pass argv to QApplication
import os
sys.path.insert(1, '../Analytics')
from menubar_utilities import *
from graph_test import Ui_showGraph
import webbrowser
import subprocess, sys
class Ui_MainWindow(object):
def show_graphs(self): #
self.window= QtWidgets.QMainWindow() #
self.ui=Ui_showGraph() #
self.ui.setupUi(self.window) #
self.window.show()
def moxa_window(self):
print('moxa')
def analytics_window(self):
print('analytics')
def detection_folder(self):
#os.startfile('../darknet_detection/CAMS') /*for windows grabage shitt*/
opener ="open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, '../darknet_detection/CAM/'])
def graph_folder(self):
#os.startfile('../Analytics/graph') /*for windows garbage shit*/
opener ="open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, '../Analytics/graph'])
def analytics_folder(self):
#os.startfile('../Analytics') /*for windows garbage shit*/
opener ="open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, '../Analytics'])
def moxahelp(self):
webbrowser.open('https://github.com/Hack-n-Chill/ShittyBots', new=2)
# def exprt(self):
# # create an exporter instance, as an argument give it
# # the item you wish to export
# self=Ui_showGraph()
# exporter = pg.exporters.ImageExporter(self.graph)
# exporter_2 = pg.exporters.ImageExporter(self.graph_2)
# # set export parameters if needed
# exporter.parameters()['width'] = 100 # (note this also affects height parameter)
# exporter_2.parameters()['width'] = 100
# # save to file
# exporter.export('Ratio vs Time.png')
# exporter_2.export('Mask vs Nomask.png')
def share(self): #calls detect.py passing three arguments
text1=self.lineEdit.text()
text2=self.lineEdit_2.text()
text3=self.lineEdit_3.text()
detect.detect_it(text1,text2,text3)
def close_window(self):
detect.close_window()
def attribute_add(self): #calls detect.py passing the attributes
ROI=self.lineEdit_4.text()
rot_angle=self.lineEdit_5.text()
stream_format=self.lineEdit_6.text()
detect.attribute_detect_it(ROI,rot_angle,stream_format)
def restart(self):
self.lineEdit.setText("")
self.lineEdit_2.setText("")
self.lineEdit_3.setText("")
self.lineEdit_4.setText("")
self.lineEdit_5.setText("")
self.lineEdit_6.setText("")
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(600, 600)
MainWindow.setMinimumSize(QtCore.QSize(600, 600))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
MainWindow.setPalette(palette)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("moxa_main.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.scrollArea = QtWidgets.QScrollArea(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.scrollArea.setPalette(palette)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 580, 538))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setContentsMargins(9, 3, -1, -1)
self.verticalLayout.setObjectName("verticalLayout")
self.label_6 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_6.setText("")
self.label_6.setObjectName("label_6")
self.verticalLayout.addWidget(self.label_6)
self.frame_2 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.frame_2)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label = QtWidgets.QLabel(self.frame_2)
font = QtGui.QFont()
font.setFamily("Stereofunk")
font.setPointSize(36)
self.label.setFont(font)
self.label.setObjectName("label")
self.horizontalLayout_3.addWidget(self.label)
self.verticalLayout.addWidget(self.frame_2)
self.groupBox_2 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.groupBox_2.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.groupBox_2.setFont(font)
self.groupBox_2.setCheckable(False)
self.groupBox_2.setObjectName("groupBox_2")
self.formLayout = QtWidgets.QFormLayout(self.groupBox_2)
self.formLayout.setContentsMargins(17, 17, 17, 17)
self.formLayout.setHorizontalSpacing(27)
self.formLayout.setVerticalSpacing(16)
self.formLayout.setObjectName("formLayout")
self.label_2 = QtWidgets.QLabel(self.groupBox_2)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.lineEdit = QtWidgets.QLineEdit(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.lineEdit.setPalette(palette)
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setObjectName("lineEdit")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit)
self.label_3 = QtWidgets.QLabel(self.groupBox_2)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.lineEdit_2 = QtWidgets.QLineEdit(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.lineEdit_2.setPalette(palette)
self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)
self.label_4 = QtWidgets.QLabel(self.groupBox_2)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.lineEdit_3 = QtWidgets.QLineEdit(self.groupBox_2)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.lineEdit_3.setPalette(palette)
self.lineEdit_3.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_3.setObjectName("lineEdit_3")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lineEdit_3)
self.verticalLayout.addWidget(self.groupBox_2)
self.frame = QtWidgets.QFrame(self.scrollAreaWidgetContents)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButton_3 = QtWidgets.QPushButton(self.frame)
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(self.restart)
self.horizontalLayout_2.addWidget(self.pushButton_3)
self.pushButton_4 = QtWidgets.QPushButton(self.frame)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout_2.addWidget(self.pushButton_4)
self.pushButton_4.clicked.connect(self.share) #calls share func to call detect.py
self.pushButton_5 = QtWidgets.QPushButton(self.frame)
self.pushButton_5.setObjectName("pushButton_5")
self.horizontalLayout_2.addWidget(self.pushButton_5)
self.pushButton_5.clicked.connect(self.close_window)
self.pushButton_7 = QtWidgets.QPushButton(self.frame)
self.pushButton_7.setObjectName("pushButton_7")
self.horizontalLayout_2.addWidget(self.pushButton_7)
self.pushButton_7.clicked.connect(self.detection_folder)
self.verticalLayout.addWidget(self.frame)
self.groupBox = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.groupBox.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_3.setObjectName("gridLayout_3")
self.pushButton = QtWidgets.QPushButton(self.groupBox)
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.show_graphs)
self.gridLayout_3.addWidget(self.pushButton, 0, 0, 1, 1)
self.pushButton_2 = QtWidgets.QPushButton(self.groupBox)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.analytics_folder)
self.gridLayout_3.addWidget(self.pushButton_2, 0, 1, 1, 1)
self.progressBar = QtWidgets.QProgressBar(self.groupBox)
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.gridLayout_3.addWidget(self.progressBar, 0, 2, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_3 = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))
brush.setStyle(QtCore.Qt.NoBrush)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.groupBox_3.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout.setObjectName("gridLayout")
self.lineEdit_5 = QtWidgets.QLineEdit(self.groupBox_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.lineEdit_5.setPalette(palette)
self.lineEdit_5.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_5.setObjectName("lineEdit_5")
self.gridLayout.addWidget(self.lineEdit_5, 1, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.groupBox_3)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 2, 0, 1, 1)
self.lineEdit_4 = QtWidgets.QLineEdit(self.groupBox_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.lineEdit_4.setPalette(palette)
self.lineEdit_4.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_4.setObjectName("lineEdit_4")
self.gridLayout.addWidget(self.lineEdit_4, 0, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.groupBox_3)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 0, 0, 1, 1)
self.lineEdit_6 = QtWidgets.QLineEdit(self.groupBox_3)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(95, 105, 112))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 47, 51))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.lineEdit_6.setPalette(palette)
self.lineEdit_6.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_6.setObjectName("lineEdit_6")
self.gridLayout.addWidget(self.lineEdit_6, 2, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.groupBox_3)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 1, 0, 1, 1)
self.pushButton_6 = QtWidgets.QPushButton(self.groupBox_3)
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton_6.clicked.connect(self.attribute_add) #to call the attribute addition function in moxa.py
self.gridLayout.addWidget(self.pushButton_6, 3, 1, 1, 1)
self.verticalLayout.addWidget(self.groupBox_3)
self.label_5 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.verticalLayout.addWidget(self.label_5)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout.addWidget(self.scrollArea)
MainWindow.setCentralWidget(self.centralwidget)
##Menu Bar
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 600, 20))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.menubar.setPalette(palette)
self.menubar.setObjectName("menubar")
self.menuView = QtWidgets.QMenu(self.menubar)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 8, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(43, 41, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(21, 7, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(15, 5, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(3, 1, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 8, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 85, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(43, 41, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(21, 7, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(15, 5, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(3, 1, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 8, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 8, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 8, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(25, 8, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(43, 41, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(43, 41, 40))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(21, 7, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(15, 5, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(3, 1, 0, 128))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush)
self.menuView.setPalette(palette)
self.menuView.setObjectName("menuView")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionMoxa_Help = QtWidgets.QAction(MainWindow)
self.actionMoxa_Help.setObjectName("actionMoxa_Help")
self.actionMoxa_Help.triggered.connect(self.moxahelp)
self.actionmoxa = QtWidgets.QAction(MainWindow)
self.actionmoxa.setObjectName("actionmoxa")
self.actionmoxa.triggered.connect(self.moxa_window)
self.actionanalytics = QtWidgets.QAction(MainWindow)
self.actionanalytics.setObjectName("actionanalytics")
self.actionanalytics.triggered.connect(self.analytics_window)
self.actionDetecions_Folder = QtWidgets.QAction(MainWindow)
self.actionDetecions_Folder.setObjectName("actionDetecions_Folder")
self.actionDetecions_Folder.triggered.connect(self.detection_folder)
self.actionGraphs = QtWidgets.QAction(MainWindow)
self.actionGraphs.setObjectName("actionGraphs")
self.actionGraphs.triggered.connect(self.graph_folder)
self.menuView.addSeparator()
self.menuView.addAction(self.actionmoxa)
self.menuView.addAction(self.actionanalytics)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionDetecions_Folder)
self.menuEdit.addAction(self.actionGraphs)
self.menuHelp.addAction(self.actionMoxa_Help)
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MOXA"))
self.label.setText(_translate("MainWindow", "Moxa"))
self.groupBox_2.setTitle(_translate("MainWindow", "Detector"))
self.label_2.setText(_translate("MainWindow", "Camera ID"))
self.label_3.setText(_translate("MainWindow", "Camera IP"))
self.label_4.setText(_translate("MainWindow", "Port ID"))
self.pushButton_3.setText(_translate("MainWindow", "New Session"))
self.pushButton_4.setText(_translate("MainWindow", "Run"))
self.pushButton_5.setText(_translate("MainWindow", "Stop"))
self.pushButton_7.setText(_translate("MainWindow", "Open Saved"))
self.groupBox.setTitle(_translate("MainWindow", "Analytics"))
self.pushButton.setText(_translate("MainWindow", "Show Graphs"))
self.pushButton_2.setText(_translate("MainWindow", "Open"))
self.groupBox_3.setTitle(_translate("MainWindow", "Streams"))
self.label_9.setText(_translate("MainWindow", "Stream Format"))
self.label_7.setText(_translate("MainWindow", "ROI"))
self.label_8.setText(_translate("MainWindow", "Rotation Angle"))
self.pushButton_6.setText(_translate("MainWindow", "Apply"))
self.label_5.setText(_translate("MainWindow", "v 4.2.0.69"))
self.menuView.setTitle(_translate("MainWindow", "View"))
self.menuEdit.setTitle(_translate("MainWindow", "Open"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionMoxa_Help.setText(_translate("MainWindow", "Moxa Help"))
self.actionmoxa.setText(_translate("MainWindow", "moxa"))
self.actionanalytics.setText(_translate("MainWindow", "analytics"))
self.actionDetecions_Folder.setText(_translate("MainWindow", "Detecions Folder"))
self.actionGraphs.setText(_translate("MainWindow", "Graphs"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 54.518041
| 113
| 0.691439
|
4a9f5313ea768073ee4c311c37b3d5c21de706e9
| 1,444
|
py
|
Python
|
tests/testcases/clear_shadow_unredirected.py
|
rbreaves/picom
|
60eb00ce1b52aee46d343481d0530d5013ab850b
|
[
"MIT"
] | 2,267
|
2019-10-25T06:05:55.000Z
|
2022-03-31T21:37:08.000Z
|
tests/testcases/clear_shadow_unredirected.py
|
rbreaves/picom
|
60eb00ce1b52aee46d343481d0530d5013ab850b
|
[
"MIT"
] | 552
|
2019-10-24T11:52:53.000Z
|
2022-03-30T18:29:42.000Z
|
tests/testcases/clear_shadow_unredirected.py
|
rbreaves/picom
|
60eb00ce1b52aee46d343481d0530d5013ab850b
|
[
"MIT"
] | 460
|
2019-11-04T20:17:17.000Z
|
2022-03-28T00:06:28.000Z
|
#!/usr/bin/env python
import xcffib.xproto as xproto
import xcffib
import time
from common import set_window_name
conn = xcffib.connect()
setup = conn.get_setup()
root = setup.roots[0].root
visual = setup.roots[0].root_visual
depth = setup.roots[0].root_depth
name = "_NET_WM_STATE"
name_atom = conn.core.InternAtom(False, len(name), name).reply().atom
atom = "ATOM"
atom_atom = conn.core.InternAtom(False, len(atom), atom).reply().atom
fs = "_NET_WM_STATE_FULLSCREEN"
fs_atom = conn.core.InternAtom(False, len(fs), fs).reply().atom
# making sure disabling shadow while screen is unredirected doesn't cause assertion failure
wid = conn.generate_id()
print("Window id is ", hex(wid))
# Create a window
conn.core.CreateWindowChecked(depth, wid, root, 0, 0, 100, 100, 0, xproto.WindowClass.InputOutput, visual, 0, []).check()
# Set Window name so it does get a shadow
set_window_name(conn, wid, "YesShadow")
# Map the window
print("mapping")
conn.core.MapWindowChecked(wid).check()
time.sleep(0.5)
# Set fullscreen property, causing screen to be unredirected
conn.core.ChangePropertyChecked(xproto.PropMode.Replace, wid, name_atom, atom_atom, 32, 1, [fs_atom]).check()
time.sleep(0.5)
# Set the Window name so it loses its shadow
print("set new name")
set_window_name(conn, wid, "NoShadow")
# Unmap the window
conn.core.UnmapWindowChecked(wid).check()
time.sleep(0.5)
# Destroy the window
conn.core.DestroyWindowChecked(wid).check()
| 27.245283
| 121
| 0.752078
|
bc029410ddede2a58edd1a63bfeb7ba1ddb45e6b
| 772
|
py
|
Python
|
src/surveys/migrations/0007_translate_interest.py
|
mrc-rius/computational_marketing_master_thesis
|
347cf9ef64fcee36cf7068d0d214ef1c9de11cb5
|
[
"MIT"
] | null | null | null |
src/surveys/migrations/0007_translate_interest.py
|
mrc-rius/computational_marketing_master_thesis
|
347cf9ef64fcee36cf7068d0d214ef1c9de11cb5
|
[
"MIT"
] | null | null | null |
src/surveys/migrations/0007_translate_interest.py
|
mrc-rius/computational_marketing_master_thesis
|
347cf9ef64fcee36cf7068d0d214ef1c9de11cb5
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-08-04 15:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('surveys', '0006_translate_hired_power'),
]
operations = [
migrations.CreateModel(
name='Translate_Interest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('interest_value', models.CharField(max_length=2)),
('min_interest', models.DecimalField(decimal_places=2, max_digits=2)),
('max_interest', models.DecimalField(decimal_places=2, max_digits=2)),
('deleted', models.IntegerField(default=0)),
],
),
]
| 32.166667
| 114
| 0.603627
|
4051600a137457a0b7b7a8676005c610fdc04a84
| 1,144
|
py
|
Python
|
capture.py
|
igor2269/terminal-webcam
|
f692abe5c93d8d9506e39f636f2e6611b724f158
|
[
"MIT"
] | 78
|
2015-01-01T05:05:14.000Z
|
2021-02-12T23:14:20.000Z
|
capture.py
|
igor2269/terminal-webcam
|
f692abe5c93d8d9506e39f636f2e6611b724f158
|
[
"MIT"
] | 3
|
2015-12-07T00:19:36.000Z
|
2018-04-20T13:58:31.000Z
|
capture.py
|
igor2269/terminal-webcam
|
f692abe5c93d8d9506e39f636f2e6611b724f158
|
[
"MIT"
] | 13
|
2015-01-08T04:11:34.000Z
|
2021-02-25T14:25:12.000Z
|
import cv
import os
import sys
import math
import curses
import signal
def signal_handler(signal, frame):
print 'You pressed Ctrl + C!'
curses.endwin()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
stdscr = curses.initscr()
palette = [' ', '.', '.', '/', 'c', '(', '@', '#', '8']
capture = cv.CaptureFromCAM(0)
# Get the width and height from the terminal (console)
(rows, columns) = os.popen('stty size', 'r').read().split()
rows = int(rows)
columns = int(columns)
while True:
# Capture the image
img = cv.QueryFrame(capture)
thumbnail = cv.CreateImage(
(columns, rows),
img.depth,
img.nChannels
)
cv.Resize(img, thumbnail)
img = thumbnail
# Print the output
for x in xrange(img.height):
for y in xrange(img.width):
b, g, r = img[x, y]
value = b * 0.1145 + g * 0.5866 + r * 0.2989
index = int(math.floor(value / (256.0 / (len(palette)))))
try:
stdscr.move(x, y)
stdscr.addch(palette[index])
except:
pass
stdscr.refresh()
| 22
| 69
| 0.556818
|
7e7553f2192704bada2abf0651f239a2c2324dbb
| 634
|
py
|
Python
|
Lib/site-packages/celerid/infrastructure/pyd/generators/iterate.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/celerid/infrastructure/pyd/generators/iterate.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/celerid/infrastructure/pyd/generators/iterate.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
import sys
old_stdout = sys.stdout
sys.stdout = file('iterate.txt', 'w')
template = """\
} else static if (ARGS == %s) {
foreach (%s; t) {
temp = _make_pytuple(%s);
if (temp is null) StackContext.throwYield(new DPyYield(null));
StackContext.throwYield(new DPyYield(temp));
}"""
def args(i):
return ['a%s' % i for i in range(0, i)]
def pyargs(i):
return ['_py(%s)' % p for p in args(i)]
for i in range(2, 11):
print template % (i, ', '.join(args(i)), ', '.join(pyargs(i)))
print ' }'
sys.stdout = old_stdout
| 26.416667
| 79
| 0.511041
|
5c5b264af1bb836d714c3303a5702b4a6de1f303
| 7,381
|
py
|
Python
|
mne/io/kit/coreg.py
|
rylaw/mne-python
|
aa526c8ed7049046734ca28493d99e841672b0eb
|
[
"BSD-3-Clause"
] | 1
|
2021-03-18T01:35:17.000Z
|
2021-03-18T01:35:17.000Z
|
mne/io/kit/coreg.py
|
rylaw/mne-python
|
aa526c8ed7049046734ca28493d99e841672b0eb
|
[
"BSD-3-Clause"
] | 2
|
2020-09-18T00:09:20.000Z
|
2020-11-10T17:47:10.000Z
|
mne/io/kit/coreg.py
|
rylaw/mne-python
|
aa526c8ed7049046734ca28493d99e841672b0eb
|
[
"BSD-3-Clause"
] | 1
|
2021-04-01T15:56:39.000Z
|
2021-04-01T15:56:39.000Z
|
"""Coordinate Point Extractor for KIT system."""
# Author: Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
from collections import OrderedDict
from os import SEEK_CUR, path as op
import pickle
import re
import numpy as np
from .constants import KIT, FIFF
from .._digitization import _make_dig_points
from ...transforms import (Transform, apply_trans, get_ras_to_neuromag_trans,
als_ras_trans)
from ...utils import warn, _check_option
INT32 = '<i4'
FLOAT64 = '<f8'
def read_mrk(fname):
r"""Marker Point Extraction in MEG space directly from sqd.
Parameters
----------
fname : str
Absolute path to Marker file.
File formats allowed: \*.sqd, \*.mrk, \*.txt, \*.pickled.
Returns
-------
mrk_points : ndarray, shape (n_points, 3)
Marker points in MEG space [m].
"""
from .kit import _read_dirs
ext = op.splitext(fname)[-1]
if ext in ('.sqd', '.mrk'):
with open(fname, 'rb', buffering=0) as fid:
dirs = _read_dirs(fid)
fid.seek(dirs[KIT.DIR_INDEX_COREG]['offset'])
# skips match_done, meg_to_mri and mri_to_meg
fid.seek(KIT.INT + (2 * KIT.DOUBLE * 16), SEEK_CUR)
mrk_count = np.fromfile(fid, INT32, 1)[0]
pts = []
for _ in range(mrk_count):
# mri_type, meg_type, mri_done, meg_done
_, _, _, meg_done = np.fromfile(fid, INT32, 4)
_, meg_pts = np.fromfile(fid, FLOAT64, 6).reshape(2, 3)
if meg_done:
pts.append(meg_pts)
mrk_points = np.array(pts)
elif ext == '.txt':
mrk_points = _read_dig_kit(fname, unit='m')
elif ext == '.pickled':
with open(fname, 'rb') as fid:
food = pickle.load(fid)
try:
mrk_points = food['mrk']
except Exception:
err = ("%r does not contain marker points." % fname)
raise ValueError(err)
else:
raise ValueError('KIT marker file must be *.sqd, *.mrk, *.txt or '
'*.pickled, *%s is not supported.' % ext)
# check output
mrk_points = np.asarray(mrk_points)
if mrk_points.shape != (5, 3):
err = ("%r is no marker file, shape is "
"%s" % (fname, mrk_points.shape))
raise ValueError(err)
return mrk_points
def read_sns(fname):
"""Sensor coordinate extraction in MEG space.
Parameters
----------
fname : str
Absolute path to sensor definition file.
Returns
-------
locs : numpy.array, shape = (n_points, 3)
Sensor coil location.
"""
p = re.compile(r'\d,[A-Za-z]*,([\.\-0-9]+),' +
r'([\.\-0-9]+),([\.\-0-9]+),' +
r'([\.\-0-9]+),([\.\-0-9]+)')
with open(fname) as fid:
locs = np.array(p.findall(fid.read()), dtype=float)
return locs
def _set_dig_kit(mrk, elp, hsp, eeg):
"""Add landmark points and head shape data to the KIT instance.
Digitizer data (elp and hsp) are represented in [mm] in the Polhemus
ALS coordinate system. This is converted to [m].
Parameters
----------
mrk : None | str | array_like, shape (5, 3)
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more
than 10`000 points are in the head shape, they are automatically
decimated.
eeg : dict
Ordered dict of EEG dig points.
Returns
-------
dig_points : list
List of digitizer points for info['dig'].
dev_head_t : dict
A dictionary describe the device-head transformation.
hpi_results : list
The hpi results.
"""
from ...coreg import fit_matched_points, _decimate_points
if isinstance(hsp, str):
hsp = _read_dig_kit(hsp)
n_pts = len(hsp)
if n_pts > KIT.DIG_POINTS:
hsp = _decimate_points(hsp, res=0.005)
n_new = len(hsp)
warn("The selected head shape contained {n_in} points, which is "
"more than recommended ({n_rec}), and was automatically "
"downsampled to {n_new} points. The preferred way to "
"downsample is using FastScan.".format(
n_in=n_pts, n_rec=KIT.DIG_POINTS, n_new=n_new))
if isinstance(elp, str):
elp_points = _read_dig_kit(elp)
if len(elp_points) != 8:
raise ValueError("File %r should contain 8 points; got shape "
"%s." % (elp, elp_points.shape))
elp = elp_points
elif len(elp) not in (6, 7, 8):
raise ValueError("ELP should contain 6 ~ 8 points; got shape "
"%s." % (elp.shape,))
if isinstance(mrk, str):
mrk = read_mrk(mrk)
mrk = apply_trans(als_ras_trans, mrk)
nasion, lpa, rpa = elp[:3]
nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
elp = apply_trans(nmtrans, elp)
hsp = apply_trans(nmtrans, hsp)
eeg = OrderedDict((k, apply_trans(nmtrans, p)) for k, p in eeg.items())
# device head transform
trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out='trans')
nasion, lpa, rpa = elp[:3]
elp = elp[3:]
dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp, dig_ch_pos=eeg)
dev_head_t = Transform('meg', 'head', trans)
hpi_results = [dict(dig_points=[
dict(ident=ci, r=r, kind=FIFF.FIFFV_POINT_HPI,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN)
for ci, r in enumerate(mrk)], coord_trans=dev_head_t)]
return dig_points, dev_head_t, hpi_results
def _read_dig_kit(fname, unit='auto'):
# Read dig points from a file and return ndarray, using FastSCAN for .txt
from ...channels.montage import (
read_polhemus_fastscan, read_dig_polhemus_isotrak, read_custom_montage,
_check_dig_shape)
assert unit in ('auto', 'm', 'mm')
_, ext = op.splitext(fname)
_check_option('file extension', ext[1:], ('hsp', 'elp', 'mat', 'txt'))
if ext == '.txt':
unit = 'mm' if unit == 'auto' else unit
out = read_polhemus_fastscan(fname, unit=unit,
on_header_missing='ignore')
elif ext in ('.hsp', '.elp'):
unit = 'm' if unit == 'auto' else unit
mon = read_dig_polhemus_isotrak(fname, unit=unit)
if fname.endswith('.hsp'):
dig = [d['r'] for d in mon.dig
if d['kind'] != FIFF.FIFFV_POINT_CARDINAL]
else:
dig = [d['r'] for d in mon.dig]
if dig and \
mon.dig[0]['kind'] == FIFF.FIFFV_POINT_CARDINAL and \
mon.dig[0]['ident'] == FIFF.FIFFV_POINT_LPA:
# LPA, Nasion, RPA -> NLR
dig[:3] = [dig[1], dig[0], dig[2]]
out = np.array(dig, float)
else:
assert ext == '.mat'
out = np.array([d['r'] for d in read_custom_montage(fname).dig])
_check_dig_shape(out)
return out
| 34.652582
| 79
| 0.581629
|
84ea7e37388a636a6a7384719cd0993a180c42e0
| 19,906
|
py
|
Python
|
push_new_pricing_to_statusdb.py
|
chuan-wang/standalone_scripts
|
66d0ee24cb77d2430a488a11043136f1c56f8e2b
|
[
"MIT"
] | 3
|
2015-11-18T07:17:32.000Z
|
2018-06-18T15:21:53.000Z
|
push_new_pricing_to_statusdb.py
|
chuan-wang/standalone_scripts
|
66d0ee24cb77d2430a488a11043136f1c56f8e2b
|
[
"MIT"
] | 43
|
2015-01-15T14:09:54.000Z
|
2022-03-30T04:49:18.000Z
|
push_new_pricing_to_statusdb.py
|
chuan-wang/standalone_scripts
|
66d0ee24cb77d2430a488a11043136f1c56f8e2b
|
[
"MIT"
] | 22
|
2015-06-03T08:21:45.000Z
|
2019-09-16T06:38:45.000Z
|
#!/usr/bin/env python
"""
Reads the cost_calculator excel sheet and puts all that information
into statusdb.
"""
import argparse
from openpyxl import load_workbook
import coloredlogs
import logging
import yaml
from couchdb import Server
import datetime
from collections import OrderedDict
import pprint
FIRST_ROW = {'components': 9,
'products': 4}
SHEET = {'components': 'Price list',
'products': 'Products'}
# Skip columns which are calculated from the other fields
SKIP = {'components': ['Price', 'Total', 'Per unit'],
'products': ['Internal', 'External']}
# The name of the _id_ key and which variables that shouldn't be changed
# while keeping the same _id_. If an update of any of these fields is needed,
# a new id should be created.
CONSERVED_KEY_SETS = {'products': ['Category', 'Type', 'Name'],
'components': ['Category', 'Type', 'Product name']}
# The combination of these "columns" should be unique within the document,
# a warning will be issued otherwise
UNIQUE_KEY_SETS = {'products': ['Category', 'Type', 'Name'],
'components': ['Category', 'Type', 'Product name', 'Units']}
NOT_NULL_KEYS = {'products': ['REF_ID', 'Category', 'Type', 'Name', 'Re-run fee'],
'components': ['REF_ID', 'Category', 'Type', 'Status',
'Product name', 'Units', 'Currency',
'List price', 'Discount']}
MAX_NR_ROWS = 200
# Assuming the rows of products are sorted in the preferred order
# Set up a logger with colored output
logger = logging.getLogger('push_new_pricing_logger')
logger.propagate = False # Otherwise the messages appeared twice
coloredlogs.install(level='INFO', logger=logger,
fmt='%(asctime)s %(levelname)s %(message)s')
def check_unique(items, type):
"""Check whether all items within _items_
fulfill the uniqueness criteria according to the UNIQUE_KEY_SETS.
Otherwise warn accordingly.
"""
key_val_set = set()
for id, item in items.items():
keys = UNIQUE_KEY_SETS[type]
t = tuple(item[key] for key in keys)
# Check that it is not already added
if t in key_val_set:
logger.warning("Key combination {}:{} is included multiple "
"times in the {} sheet. ".format(keys, t, type))
key_val_set.add(t)
return True
def check_conserved(new_items, current_items, type):
"""Ensures the keys in CONSERVED_KEY_SETS are conserved for each given id.
Compares the new version against the currently active one.
Params:
new_items - A dict of the items that are to be added
with ID attribute as the key.
current_items - A dict of the items currently in the database
with ID attribute as the key.
type - Either "components" or "products"
"""
conserved_keys = CONSERVED_KEY_SETS[type]
for id, new_item in new_items.items():
if str(id) in current_items:
for conserved_key in conserved_keys:
if conserved_key not in new_item:
logger.warning("{} column not found in new {} row with "
"id {}. This column should be kept "
"conserved.".format(conserved_key, type, id))
if new_item[conserved_key] != current_items[str(id)][conserved_key]:
logger.warning("{} should be conserved for {}. "
"Violated for item with id {}. "
"Found \"{}\" for new and \"{}\" for current. ".format(
conserved_key, type,
id, new_item[conserved_key],
current_items[str(id)][conserved_key]))
return True
def check_not_null(items, type):
"""Make sure type specific columns (given by NOT_NULL_KEYS) are not null."""
not_null_keys = NOT_NULL_KEYS[type]
for id, item in items.items():
for not_null_key in not_null_keys:
if item[not_null_key] is None or item[not_null_key] == '':
# Special case for discontinued components
if 'Status' in item and item['Status'] == 'Discontinued':
pass
else:
raise ValueError("{} cannot be empty for {}."
" Violated for item with id {}.".\
format(not_null_key, type, id))
def check_discontinued(components, products):
"""Make sure no discontinued components are used for enabled products."""
for product_id, product in products.items():
component_ids = []
if product["Components"]:
component_ids += list(product["Components"].keys())
if product["Alternative Components"]:
component_ids += list(product["Alternative Components"].keys())
for component_id in component_ids:
if product["Status"] == "Enabled":
if components[component_id]["Status"] == "Discontinued":
logger.warning(("Product {}:\"{}\" uses the discontinued component "
"{}:\"{}\", changing product status to \"discontinued\"").\
format(product_id, products[product_id]["Name"], \
component_id, components[component_id]["Product name"]))
product["Status"] = "Discontinued"
def get_current_items(db, type):
rows = db.view("entire_document/by_version", descending=True, limit=1).rows
if len(rows) != 0:
doc = rows[0].value
return doc[type]
return {}
def is_empty_row(comp):
for k, v in comp.items():
if v != '':
return False
return True
def load_products(wb):
ws = wb[SHEET['products']]
row = FIRST_ROW['products']
header_row = row - 1
header_cells = ws[header_row]
header = {}
product_price_columns = {}
for cell in header_cells:
cell_val = cell.value
if cell_val == 'ID':
cell_val = 'REF_ID' # Don't want to confuse it with couchdb ids
# Get cell column as string
cell_column = cell.coordinate.replace(str(header_row), '')
if cell_val not in SKIP['products']:
header[cell_column] = cell_val
else:
# save a lookup to find column of prices
product_price_columns[cell_val] = cell_column
products = OrderedDict()
# Unkown number of rows
while row < MAX_NR_ROWS:
new_product = {}
fetch_prices = False # default behaviour
for col, header_val in header.items():
val = ws["{}{}".format(col, row)].value
if val is None:
val = ''
if header_val in ['Components', 'Alternative Components']:
# Some cells might be interpreted as floats
# e.g. "37,78"
val = str(val)
val = val.replace('.', ',')
if val:
val_list = []
for comp_id in val.split(','):
try:
int(comp_id)
except ValueError:
print("Product on row {} has component with "
"invalid id {}: not an integer, "
" aborting!".format(row, comp_id))
raise
# Make a list with all individual components
val_list.append(comp_id)
val = {comp_ref_id: {'quantity': 1} for comp_ref_id in val_list}
elif header_val == 'Components':
# If no components are listed, price should be fetched as well,
# unless the row is in fact empty.
if not is_empty_row(new_product):
fetch_prices = True
# Comment column occurs after the price columns, so
# checking for this ensures that the prices have been parsed
if (header_val == 'Comment') and fetch_prices:
# Fixed price is added when price does not
# directly depend on the components
new_product['fixed_price'] = {}
int_cell = "{}{}".format(
product_price_columns['Internal'],
row
)
ext_cell = "{}{}".format(
product_price_columns['External'],
row
)
new_product['fixed_price']['price_in_sek'] = ws[int_cell].value
new_product['fixed_price']['external_price_in_sek'] = ws[ext_cell].value
new_product[header_val] = val
if not is_empty_row(new_product):
# The id seems to be stored as a string in the database
# so might as well always have the ids as strings.
product_id = str(new_product['REF_ID'])
# Prepare for a status value on products
if 'Status' not in new_product:
new_product['Status'] = "Enabled"
products[product_id] = new_product
row += 1
return products
def load_components(wb):
ws = wb[SHEET['components']]
# Unkown number of rows
row = FIRST_ROW['components']
header_row = row - 1
header_cells = ws[header_row]
header = {}
for cell in header_cells:
cell_val = cell.value
if cell_val == 'ID':
cell_val = 'REF_ID' # Don't want to confuse it with couchdb ids
if cell_val not in SKIP['components']:
# Get cell column as string
cell_column = cell.coordinate.replace(str(header_row), '')
header[cell_column] = cell_val
components = {}
while row < MAX_NR_ROWS:
new_component = {}
for col, header_val in header.items():
val = ws["{}{}".format(col, row)].value
if val is None:
val = ''
elif header_val == 'REF_ID':
# The id seems to be stored as a string in the database
# so might as well always have the ids as strings.
try:
int(val)
except ValueError:
print("ID value {} for row {} is not an id, "
"aborting.".format(val, row))
val = str(val)
new_component[header_val] = val
if new_component['REF_ID'] in components:
# Violates the uniqueness of the ID
raise ValueError("ID {} is included multiple "
"times in the {} sheet. "
"ABORTING.".format(new_component['REF_ID'], type))
if not is_empty_row(new_component):
components[new_component['REF_ID']] = new_component
row += 1
return components
def get_current_version(db):
view_result = db.view('entire_document/by_version', limit=1,
descending=True)
if view_result.rows:
return int(view_result.rows[0].value['Version'])
else:
return 0
def compare_two_objects(obj1, obj2, ignore_updated_time=True):
# Make copies in order to ignore fields
obj1_copy = obj1.copy()
obj2_copy = obj2.copy()
if ignore_updated_time:
if 'Last Updated' in obj1_copy:
obj1_copy.pop('Last Updated')
if 'Last Updated' in obj2_copy:
obj2_copy.pop('Last Updated')
return obj1_copy == obj2_copy
def set_last_updated_field(new_objects, current_objects, object_type):
# if object is not found or changed in current set last updated field
now = datetime.datetime.now().isoformat()
for id in list(new_objects.keys()):
updated = False
if id in current_objects:
# Beware! This simple == comparison is quite brittle. Sensitive to
# str vs int and such.
the_same = compare_two_objects(new_objects[id],
current_objects[id])
if not the_same:
updated = True
else:
updated = True
if updated:
print("Updating {}: {}".format(object_type, id))
new_objects[id]['Last Updated'] = now
else:
new_objects[id]['Last Updated'] = current_objects[id]['Last Updated']
return new_objects
def main_push(input_file, config, user, user_email,
add_components=False, add_products=False, push=False):
with open(config) as settings_file:
server_settings = yaml.load(settings_file, Loader=yaml.SafeLoader)
couch = Server(server_settings.get("couch_server", None))
wb = load_workbook(input_file, read_only=True, data_only=True)
# setup a default doc that will be pushed
doc = {}
doc['Issued by user'] = user
doc['Issued by user email'] = user_email
doc['Issued at'] = datetime.datetime.now().isoformat()
# A newly pushed document is always a draft
doc['Draft'] = True
# --- Components --- #
comp_db = couch['pricing_components']
components = load_components(wb)
check_unique(components, 'components')
check_not_null(components, 'components')
current_components = get_current_items(comp_db, 'components')
if current_components:
check_conserved(components, current_components, 'components')
# Modify the `last updated`-field of each item
components = set_last_updated_field(components,
current_components,
'component')
# Save it but push it only if products are also parsed correctly
comp_doc = doc.copy()
comp_doc['components'] = components
current_version = get_current_version(comp_db)
comp_doc['Version'] = current_version + 1
# --- Products --- #
prod_db = couch['pricing_products']
products = load_products(wb)
check_unique(products, 'products')
check_not_null(products, 'products')
current_products = get_current_items(prod_db, 'products')
if current_products:
check_conserved(products, current_products, 'products')
# Modify the `last updated`-field of each item
products = set_last_updated_field(products,
current_products,
'product')
prod_doc = doc.copy()
prod_doc['products'] = products
current_version = get_current_version(prod_db)
prod_doc['Version'] = current_version + 1
# Verify no discontinued components are used for enabled products
check_discontinued(components, products)
# --- Push or Print --- #
if push:
comp_db = couch['pricing_components']
prod_db = couch['pricing_products']
curr_comp_rows = comp_db.view("entire_document/by_version", descending=True, limit=1).rows
curr_prod_rows = prod_db.view("entire_document/by_version", descending=True, limit=1).rows
# Check that the latest one is not a draft
if (len(curr_comp_rows) == 0) or (len(curr_prod_rows) == 0):
print("No current version found. This will be the first!")
else:
curr_comp_doc = curr_comp_rows[0].value
curr_prod_doc = curr_prod_rows[0].value
if curr_comp_doc['Draft'] or curr_prod_doc['Draft']:
print("Most recent version is a draft. Please remove or "
"publish this one before pushing a new draft. Aborting!")
return
logger.info(
'Pushing components document version {}'.format(comp_doc['Version'])
)
comp_db.save(comp_doc)
logger.info(
'Pushing products document version {}'.format(prod_doc['Version'])
)
prod_db.save(prod_doc)
else:
# Prettyprint the json output
pprint.pprint(comp_doc)
pprint.pprint(prod_doc)
def main_publish(config, user, user_email, dryrun=True):
with open(config) as settings_file:
server_settings = yaml.load(settings_file, Loader=yaml.SafeLoader)
couch = Server(server_settings.get("couch_server", None))
comp_db = couch['pricing_components']
prod_db = couch['pricing_products']
comp_rows = comp_db.view("entire_document/by_version", descending=True, limit=1).rows
prod_rows = prod_db.view("entire_document/by_version", descending=True, limit=1).rows
if (len(comp_rows) == 0) or (len(prod_rows) == 0):
print("No draft version found to publish. Aborting!")
return
comp_doc = comp_rows[0].value
prod_doc = prod_rows[0].value
if (not comp_doc['Draft']) or (not prod_doc['Draft']):
print("Most recent version is not a draft. Aborting!")
return
now = datetime.datetime.now().isoformat()
comp_doc['Draft'] = False
comp_doc['Published'] = now
prod_doc['Draft'] = False
prod_doc['Published'] = now
if not dryrun:
logger.info(
'Pushing components document version {}'.format(comp_doc['Version'])
)
comp_db.save(comp_doc)
logger.info(
'Pushing products document version {}'.format(prod_doc['Version'])
)
prod_db.save(prod_doc)
else:
print(prod_doc, comp_doc)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(
title='actions',
dest='subcommand_name',
description="Either 'push' for uploading a draft "
"pricing version or 'publish' to make "
"the current draft the latest version"
)
push_parser = subparsers.add_parser('push')
push_parser.add_argument('pricing_excel_file',
help="The excel file currently used for pricing")
push_parser.add_argument('--statusdb_config', required=True,
help='The genomics-status settings.yaml file.')
push_parser.add_argument('--push', action='store_true',
help='Use this tag to actually push to the databse,'
' otherwise it is just dryrun')
push_parser.add_argument('--user', required=True,
help='User that change the document')
push_parser.add_argument('--user_email', required=True,
help='Email for the user who changed the document')
publish_parser = subparsers.add_parser('publish')
publish_parser.add_argument('--statusdb_config', required=True,
help='The genomics-status settings.yaml file.')
publish_parser.add_argument('--user', required=True,
help='User that change the document')
publish_parser.add_argument('--user_email', required=True,
help='Email for the user who changed the document')
publish_parser.add_argument('--dryrun', action='store_true',
help="Use this tag to only print what would "
"have been done")
args = parser.parse_args()
if args.subcommand_name == 'push':
main_push(args.pricing_excel_file, args.statusdb_config, args.user,
args.user_email, push=args.push)
elif args.subcommand_name == 'publish':
main_publish(args.statusdb_config, args.user, args.user_email,
dryrun=args.dryrun)
| 38.428571
| 99
| 0.576861
|
93a0a3f5c554f0ef23e5348c19406280eaa22dbe
| 4,133
|
py
|
Python
|
tensorflow_bring_your_own_california_housing_local_training_and_batch_transform/tensorflow_bring_your_own_california_housing_local_training_and_batch_transform.py
|
aws-samples/amazon-sagemaker-local-mode
|
f470d7b543f7895094816c3f58b9981e044764d8
|
[
"MIT-0"
] | 111
|
2020-11-10T18:09:34.000Z
|
2022-03-28T12:55:37.000Z
|
tensorflow_bring_your_own_california_housing_local_training_and_batch_transform/tensorflow_bring_your_own_california_housing_local_training_and_batch_transform.py
|
aws-samples/amazon-sagemaker-local-mode
|
f470d7b543f7895094816c3f58b9981e044764d8
|
[
"MIT-0"
] | 9
|
2020-11-18T10:43:29.000Z
|
2022-03-08T08:42:52.000Z
|
tensorflow_bring_your_own_california_housing_local_training_and_batch_transform/tensorflow_bring_your_own_california_housing_local_training_and_batch_transform.py
|
aws-samples/amazon-sagemaker-local-mode
|
f470d7b543f7895094816c3f58b9981e044764d8
|
[
"MIT-0"
] | 20
|
2020-11-10T09:13:15.000Z
|
2022-03-02T14:20:42.000Z
|
# This is a sample Python program that trains a BYOC TensorFlow model, and then performs inference.
# This implementation will work on your local computer.
#
# Prerequisites:
# 1. Install required Python packages:
# pip install boto3 sagemaker pandas scikit-learn
# pip install 'sagemaker[local]'
# 2. Docker Desktop has to be installed on your computer, and running.
# 3. Open terminal and run the following commands:
# docker build -t sagemaker-tensorflow2-batch-transform-local container/.
########################################################################################################################
import os
import pandas as pd
import sklearn.model_selection
from sagemaker.estimator import Estimator
from sklearn.datasets import *
from sklearn.preprocessing import StandardScaler
DUMMY_IAM_ROLE = 'arn:aws:iam::111111111111:role/service-role/AmazonSageMaker-ExecutionRole-20200101T000001'
def download_training_and_eval_data():
if os.path.isfile('./data/train/x_train.csv') and \
os.path.isfile('./data/test/x_test.csv') and \
os.path.isfile('./data/train/y_train.csv') and \
os.path.isfile('./data/test/y_test.csv'):
print('Training and evaluation datasets exist. Skipping Download')
else:
print('Downloading training and evaluation dataset')
data_dir = os.path.join(os.getcwd(), 'data')
os.makedirs(data_dir, exist_ok=True)
train_dir = os.path.join(os.getcwd(), 'data/train')
os.makedirs(train_dir, exist_ok=True)
test_dir = os.path.join(os.getcwd(), 'data/test')
os.makedirs(test_dir, exist_ok=True)
input_dir = os.path.join(os.getcwd(), 'data/input')
os.makedirs(input_dir, exist_ok=True)
output_dir = os.path.join(os.getcwd(), 'data/output')
os.makedirs(output_dir, exist_ok=True)
data_set = fetch_california_housing()
X = pd.DataFrame(data_set.data, columns=data_set.feature_names)
Y = pd.DataFrame(data_set.target)
# We partition the dataset into 2/3 training and 1/3 test set.
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.33)
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
pd.DataFrame(x_train).to_csv(os.path.join(train_dir, 'x_train.csv'), header=None, index=False)
pd.DataFrame(x_test).to_csv(os.path.join(test_dir, 'x_test.csv'),header=None, index=False)
pd.DataFrame(x_test).to_csv(os.path.join(input_dir, 'x_test.csv'),header=None, index=False)
pd.DataFrame(y_train).to_csv(os.path.join(train_dir, 'y_train.csv'), header=None, index=False)
pd.DataFrame(y_test).to_csv(os.path.join(test_dir, 'y_test.csv'), header=None, index=False)
print('Downloading completed')
def main():
download_training_and_eval_data()
image = 'sagemaker-tensorflow2-batch-transform-local'
print('Starting model training.')
california_housing_estimator = Estimator(
image,
DUMMY_IAM_ROLE,
hyperparameters={'epochs': 10,
'batch_size': 64,
'learning_rate': 0.1},
instance_count=1,
instance_type="local")
inputs = {'train': 'file://./data/train', 'test': 'file://./data/test'}
california_housing_estimator.fit(inputs, logs=True)
print('Completed model training')
print('Running Batch Transform in local mode')
tensorflow_serving_transformer = california_housing_estimator.transformer(
instance_count=1,
instance_type='local',
output_path='file:./data/output',
)
tensorflow_serving_transformer.transform('file://./data/input',
split_type='Line',
content_type='text/csv')
print('Printing Batch Transform output file content')
output_file = open('./data/output/x_test.csv.out', 'r').read()
print(output_file)
if __name__ == "__main__":
main()
| 38.990566
| 120
| 0.646988
|
47a3c2e0e95006175cacd60fb6f18c129bfc15fb
| 7,723
|
py
|
Python
|
samples/python/tensorflow_object_detection_api/image_batcher.py
|
L-Net-1992/TensorRT
|
34b664d404001bd724cb56b52a6e0e05e1fd97f2
|
[
"Apache-2.0"
] | null | null | null |
samples/python/tensorflow_object_detection_api/image_batcher.py
|
L-Net-1992/TensorRT
|
34b664d404001bd724cb56b52a6e0e05e1fd97f2
|
[
"Apache-2.0"
] | null | null | null |
samples/python/tensorflow_object_detection_api/image_batcher.py
|
L-Net-1992/TensorRT
|
34b664d404001bd724cb56b52a6e0e05e1fd97f2
|
[
"Apache-2.0"
] | null | null | null |
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import numpy as np
from PIL import Image
class ImageBatcher:
"""
Creates batches of pre-processed images.
"""
def __init__(self, input, shape, dtype, max_num_images=None, exact_batches=False, preprocessor="fixed_shape_resizer"):
"""
:param input: The input directory to read images from.
:param shape: The tensor shape of the batch to prepare, either in NCHW or NHWC format.
:param dtype: The (numpy) datatype to cast the batched data to.
:param max_num_images: The maximum number of images to read from the directory.
:param exact_batches: This defines how to handle a number of images that is not an exact multiple of the batch
size. If false, it will pad the final batch with zeros to reach the batch size. If true, it will *remove* the
last few images in excess of a batch size multiple, to guarantee batches are exact (useful for calibration).
:param preprocessor: Set the preprocessor to use, depending on which network is being used.
"""
# Find images in the given input path
input = os.path.realpath(input)
self.images = []
extensions = [".jpg", ".jpeg", ".png", ".bmp"]
def is_image(path):
return os.path.isfile(path) and os.path.splitext(path)[1].lower() in extensions
if os.path.isdir(input):
self.images = [os.path.join(input, f) for f in os.listdir(input) if is_image(os.path.join(input, f))]
self.images.sort()
elif os.path.isfile(input):
if is_image(input):
self.images.append(input)
self.num_images = len(self.images)
if self.num_images < 1:
print("No valid {} images found in {}".format("/".join(extensions), input))
sys.exit(1)
# Handle Tensor Shape
self.dtype = dtype
self.shape = shape
assert len(self.shape) == 4
self.batch_size = shape[0]
assert self.batch_size > 0
self.format = None
self.width = -1
self.height = -1
if self.shape[1] == 3:
self.format = "NCHW"
self.height = self.shape[2]
self.width = self.shape[3]
elif self.shape[3] == 3:
self.format = "NHWC"
self.height = self.shape[1]
self.width = self.shape[2]
assert all([self.format, self.width > 0, self.height > 0])
# Adapt the number of images as needed
if max_num_images and 0 < max_num_images < len(self.images):
self.num_images = max_num_images
if exact_batches:
self.num_images = self.batch_size * (self.num_images // self.batch_size)
if self.num_images < 1:
print("Not enough images to create batches")
sys.exit(1)
self.images = self.images[0:self.num_images]
# Subdivide the list of images into batches
self.num_batches = 1 + int((self.num_images - 1) / self.batch_size)
self.batches = []
for i in range(self.num_batches):
start = i * self.batch_size
end = min(start + self.batch_size, self.num_images)
self.batches.append(self.images[start:end])
# Indices
self.image_index = 0
self.batch_index = 0
self.preprocessor = preprocessor
def preprocess_image(self, image_path):
"""
The image preprocessor loads an image from disk and prepares it as needed for batching. This includes padding,
resizing, normalization, data type casting, and transposing.
This Image Batcher implements one algorithm for now:
* Resizes and pads the image to fit the input size.
:param image_path: The path to the image on disk to load.
:return: Two values: A numpy array holding the image sample, ready to be contacatenated into the rest of the
batch, and the resize scale used, if any.
"""
def resize_pad(image, pad_color=(0, 0, 0)):
"""
A subroutine to implement padding and resizing. This will resize the image to fit fully within the input
size, and pads the remaining bottom-right portions with the value provided.
:param image: The PIL image object
:pad_color: The RGB values to use for the padded area. Default: Black/Zeros.
:return: Two values: The PIL image object already padded and cropped, and the resize scale used.
"""
# Get characteristics.
width, height = image.size
width_scale = width / self.width
height_scale = height / self.height
# Depending on preprocessor, box scaling will be slightly different.
if self.preprocessor == "fixed_shape_resizer":
scale = [self.width / width, self.height / height]
image = image.resize((self.width, self.height), resample=Image.BILINEAR)
return image, scale
elif self.preprocessor == "keep_aspect_ratio_resizer":
scale = 1.0 / max(width_scale, height_scale)
image = image.resize((round(width * scale), round(height * scale)), resample=Image.BILINEAR)
pad = Image.new("RGB", (self.width, self.height))
pad.paste(pad_color, [0, 0, self.width, self.height])
pad.paste(image)
return pad, scale
scale = None
image = Image.open(image_path)
image = image.convert(mode='RGB')
if self.preprocessor == "fixed_shape_resizer" or self.preprocessor == "keep_aspect_ratio_resizer":
#Resize & Pad with ImageNet mean values and keep as [0,255] Normalization
image, scale = resize_pad(image, (124, 116, 104))
image = np.asarray(image, dtype=self.dtype)
else:
print("Preprocessing method {} not supported".format(self.preprocessor))
sys.exit(1)
if self.format == "NCHW":
image = np.transpose(image, (2, 0, 1))
return image, scale
def get_batch(self):
"""
Retrieve the batches. This is a generator object, so you can use it within a loop as:
for batch, images in batcher.get_batch():
...
Or outside of a batch with the next() function.
:return: A generator yielding three items per iteration: a numpy array holding a batch of images, the list of
paths to the images loaded within this batch, and the list of resize scales for each image in the batch.
"""
for i, batch_images in enumerate(self.batches):
batch_data = np.zeros(self.shape, dtype=self.dtype)
batch_scales = [None] * len(batch_images)
for i, image in enumerate(batch_images):
self.image_index += 1
batch_data[i], batch_scales[i] = self.preprocess_image(image)
self.batch_index += 1
yield batch_data, batch_images, batch_scales
| 44.641618
| 122
| 0.625793
|
7529855d393e5525459fe00af5b777073ea11714
| 7,645
|
py
|
Python
|
client/tests/interconnect/network_test.py
|
beehive-lab/DFLOW
|
9710e76bd957df3b9cd3cecad2967990ca1415f7
|
[
"Apache-2.0"
] | 1
|
2021-04-17T00:48:35.000Z
|
2021-04-17T00:48:35.000Z
|
client/tests/interconnect/network_test.py
|
beehive-lab/DFLOW
|
9710e76bd957df3b9cd3cecad2967990ca1415f7
|
[
"Apache-2.0"
] | null | null | null |
client/tests/interconnect/network_test.py
|
beehive-lab/DFLOW
|
9710e76bd957df3b9cd3cecad2967990ca1415f7
|
[
"Apache-2.0"
] | null | null | null |
import socket
import unittest
from unittest.mock import (
patch,
MagicMock,
call
)
from client.interconnect.network import NetworkLink
class NetworkLinkTestCase(unittest.TestCase):
"""
A suite of tests surrounding the NetworkLink class.
"""
def setUp(self) -> None:
self._test_host_name = 'test_host'
self._test_port_num = 8080
self._test_data = b'some_test_binary_string'
self._test_ca_cert = 'test_ca_cert'
self._test_cert = 'test_cert'
self._test_key = 'test_key'
self._setup_mocks_and_patches()
def _setup_mocks_and_patches(self):
"""
Set up all the patches and mocks needed for the tests in this suite.
"""
# Set up patches for socket.Socket() create_default_context()
# along with and add cleanup.
self._mock_socket_constructor = patch('socket.socket').start()
self._mock_create_default_context = (
patch('ssl.create_default_context').start()
)
self.addCleanup(patch.stopall)
# Create a mock socket instance and make sure it is returned by the
# mock socket constructor.
self._mock_socket_instance = MagicMock()
self._mock_socket_constructor.return_value = \
self._mock_socket_instance
# Create a mock SSLContext instance and make sure it is returned by
# the mock create_default_context method.
self._mock_ssl_context = MagicMock()
self._mock_create_default_context.return_value = \
self._mock_ssl_context
# Create a mock ssl socket and return it from SSLContext.wrap_socket().
self._mock_ssl_socket = MagicMock()
self._mock_ssl_context.wrap_socket.return_value = \
self._mock_ssl_socket
def test_network_link_create(self):
"""
Test that network link is created correctly.
"""
# Create the network link.
NetworkLink(
self._test_host_name,
self._test_port_num,
self._test_ca_cert,
self._test_cert,
self._test_key
)
# Make sure the ssl context was created with correct parameters.
self._mock_create_default_context.assert_called_with(
cafile=self._test_ca_cert
)
# Make sure the certificate and private key were loaded correctly.
self._mock_ssl_context.load_cert_chain.assert_called_with(
certfile=self._test_cert,
keyfile=self._test_key
)
# Make sure the hostname checking is set correctly.
self.assertFalse(self._mock_ssl_context.check_hostname)
# Make sure the socket was created with correct parameters.
self._mock_socket_constructor.assert_called_with(
socket.AF_INET,
socket.SOCK_STREAM
)
# Make sure the ssl context was used to wrap the socket.
self._mock_ssl_context.wrap_socket.assert_called_with(
self._mock_socket_instance
)
def test_network_link_connect(self):
"""
Test that network link connects correctly.
"""
# Create the network link and call connect().
network_link = NetworkLink(
self._test_host_name,
self._test_port_num,
self._test_ca_cert,
self._test_cert,
self._test_key
)
network_link.connect()
# Make sure connect was called on the socket with correct
# parameters.
self._mock_ssl_socket.connect.assert_called_with(
(self._test_host_name, self._test_port_num)
)
def test_network_link_disconnect(self):
"""
Test that network link disconnects correctly.
"""
# Create the network link and call connect() then disconnect.
network_link = NetworkLink(
self._test_host_name,
self._test_port_num,
self._test_ca_cert,
self._test_cert,
self._test_key
)
network_link.connect()
network_link.disconnect()
# Make sure disconnect was called on the socket.
self._mock_ssl_socket.close.assert_called()
def test_network_link_reconnect(self):
"""
Test that network link reconnects correctly.
"""
# Create a manager mock that we can use to verify the order of
# calls is correct.
mock_call_manager = MagicMock()
mock_call_manager.attach_mock(self._mock_socket_constructor, 'create')
mock_call_manager.attach_mock(self._mock_ssl_socket.connect, 'connect')
mock_call_manager.attach_mock(self._mock_ssl_socket.close, 'close')
# Create the network link and call connect() then reconnect.
network_link = NetworkLink(
self._test_host_name,
self._test_port_num,
self._test_ca_cert,
self._test_cert,
self._test_key
)
network_link._connected = True
network_link.reconnect()
# Check that when reconnect is called the existing socket is first
# closed before a new one is created.
self.assertEqual(
[
call.create(socket.AF_INET, socket.SOCK_STREAM),
call.close(),
call.create(socket.AF_INET, socket.SOCK_STREAM),
call.connect((self._test_host_name, self._test_port_num))
],
mock_call_manager.mock_calls
)
def test_network_link_send(self):
"""
Test that network link sends data correctly.
"""
# Create the network link.
network_link = NetworkLink(
self._test_host_name,
self._test_port_num,
self._test_ca_cert,
self._test_cert,
self._test_key
)
network_link._connected = True
# Create a byte string and attempt to send it through the network link.
network_link.send(self._test_data)
# Make sure the correct data was passed to the underlying socket.
self._mock_ssl_socket.sendall.assert_called_with(self._test_data)
def test_network_link_receive(self):
"""
Test that network link receives data correctly.
"""
# Create some messages that we expect to receive.
expected_data_values = [
b'msg1',
b'msg2',
b'msg3',
b'msg4',
b'msg5',
b'msg6',
]
# Set the socket recv method to return the data in an unpredictable
# manner such as may be seen when streaming bytes over the network.
self._mock_ssl_socket.recv.side_effect = [
b'msg1\nmsg2\nmsg3\n',
b'msg',
b'4',
b'\n',
b'msg5\nmsg',
b'6\n'
]
# Create the network link.
network_link = NetworkLink(
self._test_host_name,
self._test_port_num,
self._test_ca_cert,
self._test_cert,
self._test_key
)
network_link._connected = True
# Repeatedly call the receive() method on the network link and verify
# that the messages returned are correct.
for expected_data in expected_data_values:
actual_data = network_link.receive()
# Make sure the recv() method of the underlying socket was called
# and the data was returned as expected.
self.assertEqual(expected_data, actual_data)
if __name__ == '__main__':
unittest.main()
| 32.121849
| 79
| 0.615958
|
efbdd1ba6842d85e82149346e9b4559527a1aacd
| 2,023
|
py
|
Python
|
tensorflow/python/profiler/profiler.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 71
|
2017-05-25T16:02:15.000Z
|
2021-06-09T16:08:08.000Z
|
tensorflow/python/profiler/profiler.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133
|
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/python/profiler/profiler.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 31
|
2018-09-11T02:17:17.000Z
|
2021-12-15T10:33:35.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""profiler python module provides APIs to profile TensorFlow models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.core.profiler.tfprof_log_pb2 import OpLogProto
from tensorflow.core.profiler.tfprof_output_pb2 import AdviceProto
from tensorflow.core.profiler.tfprof_output_pb2 import GraphNodeProto
from tensorflow.core.profiler.tfprof_output_pb2 import MultiGraphNodeProto
from tensorflow.python.profiler.model_analyzer import advise
from tensorflow.python.profiler.model_analyzer import profile
from tensorflow.python.profiler.model_analyzer import Profiler
from tensorflow.python.profiler.option_builder import ProfileOptionBuilder
from tensorflow.python.profiler.tfprof_logger import write_op_log
from tensorflow.python.util.tf_export import tf_export
_allowed_symbols = [
'Profiler',
'profile',
'ProfileOptionBuilder',
'advise',
'write_op_log',
]
_allowed_symbols.extend([
'GraphNodeProto',
'MultiGraphNodeProto',
'AdviceProto',
'OpLogProto',
])
# Export protos
tf_export('profiler.GraphNodeProto')(GraphNodeProto)
tf_export('profiler.MultiGraphNodeProto')(MultiGraphNodeProto)
tf_export('profiler.AdviceProto')(AdviceProto)
tf_export('profiler.OpLogProto')(OpLogProto)
| 36.125
| 80
| 0.773604
|
b6ae07747fc7df309d15f33d337600f94a139bfe
| 432
|
py
|
Python
|
molo/core/migrations/0071_remove_old_image_hashes.py
|
Ishma59/molo
|
4fd31df9266bc251e09e9339a132d3ccd4143c69
|
[
"BSD-2-Clause"
] | 25
|
2015-09-26T13:45:30.000Z
|
2018-09-13T14:12:20.000Z
|
molo/core/migrations/0071_remove_old_image_hashes.py
|
Ishma59/molo
|
4fd31df9266bc251e09e9339a132d3ccd4143c69
|
[
"BSD-2-Clause"
] | 510
|
2015-05-29T09:30:44.000Z
|
2018-12-11T09:08:11.000Z
|
molo/core/migrations/0071_remove_old_image_hashes.py
|
Ishma59/molo
|
4fd31df9266bc251e09e9339a132d3ccd4143c69
|
[
"BSD-2-Clause"
] | 5
|
2020-03-26T19:30:13.000Z
|
2020-09-04T16:35:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def delete_imageinfo(apps, schema_editor):
ImageInfo = apps.get_model('core.ImageInfo')
ImageInfo.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0070_add_service_directory_api'),
]
operations = [
migrations.RunPython(delete_imageinfo),
]
| 20.571429
| 51
| 0.694444
|
64afdd8337e1499dcd12e439c8d3a5081a86d0aa
| 10,323
|
py
|
Python
|
2- Programa/geopy-master/test/geocoders/base.py
|
federicopratto/Algoritmos-y-Programacion-1---TP-1
|
d9c1071f16171ad563ec02eb4b1d5ea6f657b2ac
|
[
"MIT"
] | null | null | null |
2- Programa/geopy-master/test/geocoders/base.py
|
federicopratto/Algoritmos-y-Programacion-1---TP-1
|
d9c1071f16171ad563ec02eb4b1d5ea6f657b2ac
|
[
"MIT"
] | null | null | null |
2- Programa/geopy-master/test/geocoders/base.py
|
federicopratto/Algoritmos-y-Programacion-1---TP-1
|
d9c1071f16171ad563ec02eb4b1d5ea6f657b2ac
|
[
"MIT"
] | null | null | null |
import unittest
from mock import patch, sentinel
import warnings
import geopy.compat
import geopy.geocoders
from geopy.exc import GeocoderNotFound, GeocoderQueryError
from geopy.geocoders import GoogleV3, get_geocoder_for_service
from geopy.geocoders.base import Geocoder
from geopy.point import Point
class GetGeocoderTestCase(unittest.TestCase):
def test_get_geocoder_for_service(self):
self.assertEqual(get_geocoder_for_service("google"), GoogleV3)
self.assertEqual(get_geocoder_for_service("googlev3"), GoogleV3)
def test_get_geocoder_for_service_raises_for_unknown(self):
with self.assertRaises(GeocoderNotFound):
get_geocoder_for_service("")
class GeocoderTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.geocoder = Geocoder()
def test_init_with_args(self):
format_string = '%s Los Angeles, CA USA'
scheme = 'http'
timeout = 942
proxies = {'https': '192.0.2.0'}
user_agent = 'test app'
ssl_context = sentinel.some_ssl_context
geocoder = Geocoder(
format_string=format_string,
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
)
for attr in ('format_string', 'scheme', 'timeout', 'proxies',
'ssl_context'):
self.assertEqual(locals()[attr], getattr(geocoder, attr))
self.assertEqual(user_agent, geocoder.headers['User-Agent'])
def test_init_with_defaults(self):
attr_to_option = {
'format_string': 'default_format_string',
'scheme': 'default_scheme',
'timeout': 'default_timeout',
'proxies': 'default_proxies',
'ssl_context': 'default_ssl_context',
}
geocoder = Geocoder()
for geocoder_attr, options_attr in attr_to_option.items():
self.assertEqual(getattr(geopy.geocoders.options, options_attr),
getattr(geocoder, geocoder_attr))
self.assertEqual(geopy.geocoders.options.default_user_agent,
geocoder.headers['User-Agent'])
@patch.object(geopy.geocoders.options, 'default_proxies', {'https': '192.0.2.0'})
@patch.object(geopy.geocoders.options, 'default_timeout', 10)
@patch.object(geopy.geocoders.options, 'default_ssl_context',
sentinel.some_ssl_context)
def test_init_with_none_overrides_default(self):
geocoder = Geocoder(proxies=None, timeout=None, ssl_context=None)
self.assertIsNone(geocoder.proxies)
self.assertIsNone(geocoder.timeout)
self.assertIsNone(geocoder.ssl_context)
@patch.object(geopy.geocoders.options, 'default_user_agent',
'mocked_user_agent/0.0.0')
def test_user_agent_default(self):
geocoder = Geocoder()
self.assertEqual(geocoder.headers['User-Agent'],
'mocked_user_agent/0.0.0')
def test_user_agent_custom(self):
geocoder = Geocoder(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
@patch.object(geopy.geocoders.options, 'default_timeout', 12)
def test_call_geocoder_timeout(self):
url = 'spam://ham/eggs'
g = Geocoder()
self.assertEqual(g.timeout, 12)
# Suppress another (unrelated) warning when running tests on an old Python.
with patch('geopy.compat._URLLIB_SUPPORTS_SSL_CONTEXT', True), \
patch.object(g, 'urlopen') as mock_urlopen:
g._call_geocoder(url, raw=True)
args, kwargs = mock_urlopen.call_args
self.assertEqual(kwargs['timeout'], 12)
g._call_geocoder(url, timeout=7, raw=True)
args, kwargs = mock_urlopen.call_args
self.assertEqual(kwargs['timeout'], 7)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
g._call_geocoder(url, timeout=None, raw=True)
args, kwargs = mock_urlopen.call_args
self.assertEqual(kwargs['timeout'], 12)
self.assertEqual(1, len(w))
def test_ssl_context_for_old_python(self):
# Before (exclusive) 2.7.9 and 3.4.3.
# Keep the reference, because `geopy.compat.HTTPSHandler` will be
# mocked below.
orig_HTTPSHandler = geopy.compat.HTTPSHandler
class HTTPSHandlerStub(geopy.compat.HTTPSHandler):
def __init__(self): # No `context` arg.
orig_HTTPSHandler.__init__(self)
if hasattr(geopy.compat, '__warningregistry__'):
# If running tests on an old Python, the warning we are going
# to test might have been already issued and recorded in
# the registry. Clean it up, so we could receive the warning again.
del geopy.compat.__warningregistry__
with patch('geopy.compat._URLLIB_SUPPORTS_SSL_CONTEXT',
geopy.compat._is_urllib_context_supported(HTTPSHandlerStub)), \
patch('geopy.compat.HTTPSHandler', HTTPSHandlerStub), \
warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertFalse(geopy.compat._URLLIB_SUPPORTS_SSL_CONTEXT)
self.assertEqual(0, len(w))
Geocoder()
self.assertEqual(1, len(w))
def test_ssl_context_for_newer_python(self):
# From (inclusive) 2.7.9 and 3.4.3.
# Keep the reference, because `geopy.compat.HTTPSHandler` will be
# mocked below.
orig_HTTPSHandler = geopy.compat.HTTPSHandler
class HTTPSHandlerStub(geopy.compat.HTTPSHandler):
def __init__(self, context=None):
orig_HTTPSHandler.__init__(self)
if hasattr(geopy.compat, '__warningregistry__'):
# If running tests on an old Python, the warning we are going
# to test might have been already issued and recorded in
# the registry. Clean it up, so we could receive the warning again.
del geopy.compat.__warningregistry__
with patch('geopy.compat._URLLIB_SUPPORTS_SSL_CONTEXT',
geopy.compat._is_urllib_context_supported(HTTPSHandlerStub)), \
patch('geopy.compat.HTTPSHandler', HTTPSHandlerStub), \
patch.object(HTTPSHandlerStub, '__init__', autospec=True,
side_effect=HTTPSHandlerStub.__init__
) as mock_https_handler_init, \
warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertTrue(geopy.compat._URLLIB_SUPPORTS_SSL_CONTEXT)
for ssl_context in (None, sentinel.some_ssl_context):
mock_https_handler_init.reset_mock()
Geocoder(ssl_context=ssl_context)
args, kwargs = mock_https_handler_init.call_args
self.assertIs(kwargs['context'], ssl_context)
self.assertEqual(0, len(w))
class GeocoderPointCoercionTestCase(unittest.TestCase):
coordinates = (40.74113, -73.989656)
coordinates_str = "40.74113,-73.989656"
coordinates_address = "175 5th Avenue, NYC, USA"
def setUp(self):
self.method = Geocoder._coerce_point_to_string
def test_point(self):
latlon = self.method(Point(*self.coordinates))
self.assertEqual(latlon, self.coordinates_str)
def test_tuple_of_floats(self):
latlon = self.method(self.coordinates)
self.assertEqual(latlon, self.coordinates_str)
def test_string(self):
latlon = self.method(self.coordinates_str)
self.assertEqual(latlon, self.coordinates_str)
def test_string_is_trimmed(self):
coordinates_str_spaces = " %s , %s " % self.coordinates
latlon = self.method(coordinates_str_spaces)
self.assertEqual(latlon, self.coordinates_str)
def test_output_format_is_respected(self):
expected = " %s %s " % self.coordinates[::-1]
lonlat = self.method(self.coordinates_str, " %(lon)s %(lat)s ")
self.assertEqual(lonlat, expected)
def test_address(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
latlon = self.method(self.coordinates_address)
# 1 for latitude normalization (first string char being
# treated as latitude).
# 2 for the deprecated as-is input bypass.
self.assertEqual(2, len(w))
self.assertEqual(latlon, self.coordinates_address)
class GeocoderFormatBoundingBoxTestCase(unittest.TestCase):
def setUp(self):
self.method = Geocoder._format_bounding_box
def test_string_raises(self):
with self.assertRaises(GeocoderQueryError):
self.method("5,5,5,5")
def test_list_of_1_raises(self):
with self.assertRaises(GeocoderQueryError):
self.method([5])
# TODO maybe raise for `[5, 5]` too?
def test_list_of_3_raises(self):
with self.assertRaises(GeocoderQueryError):
self.method([5, 5, 5])
def test_list_of_4_raises(self):
with self.assertRaises(GeocoderQueryError):
self.method([5, 5, 5, 5])
def test_list_of_5_raises(self):
with self.assertRaises(GeocoderQueryError):
self.method([5, 5, 5, 5, 5])
def test_points(self):
bbox = self.method([Point(50, 160), Point(30, 170)])
self.assertEqual(bbox, "30.0,160.0,50.0,170.0")
def test_lists(self):
bbox = self.method([[50, 160], [30, 170]])
self.assertEqual(bbox, "30.0,160.0,50.0,170.0")
bbox = self.method([["50", "160"], ["30", "170"]])
self.assertEqual(bbox, "30.0,160.0,50.0,170.0")
def test_strings(self):
bbox = self.method(["50, 160", "30,170"])
self.assertEqual(bbox, "30.0,160.0,50.0,170.0")
def test_output_format(self):
bbox = self.method([Point(50, 160), Point(30, 170)],
" %(lon2)s|%(lat2)s -- %(lat1)s|%(lon1)s ")
self.assertEqual(bbox, " 170.0|50.0 -- 30.0|160.0 ")
| 38.662921
| 85
| 0.637024
|
c763bdc7784e04b7c321ea78189bbc35d1fb4175
| 9,337
|
py
|
Python
|
fastai/tabular/data.py
|
feras-oughali/fastai
|
4052f7adb441ab8a00eaa807b444a4e583b6bcc7
|
[
"Apache-2.0"
] | 11
|
2019-08-06T11:44:24.000Z
|
2022-03-12T20:04:56.000Z
|
fastai/tabular/data.py
|
feras-oughali/fastai
|
4052f7adb441ab8a00eaa807b444a4e583b6bcc7
|
[
"Apache-2.0"
] | 3
|
2021-05-20T11:24:31.000Z
|
2022-02-26T06:04:21.000Z
|
fastai/tabular/data.py
|
feras-oughali/fastai
|
4052f7adb441ab8a00eaa807b444a4e583b6bcc7
|
[
"Apache-2.0"
] | 9
|
2018-11-03T10:56:17.000Z
|
2020-10-19T20:44:33.000Z
|
"Data loading pipeline for structured data support. Loads from pandas DataFrame"
from ..torch_core import *
from .transform import *
from ..basic_data import *
from ..data_block import *
from ..basic_train import *
from .models import *
from pandas.api.types import is_numeric_dtype, is_categorical_dtype
__all__ = ['TabularDataBunch', 'TabularLine', 'TabularList', 'TabularProcessor', 'tabular_learner']
OptTabTfms = Optional[Collection[TabularProc]]
#def emb_sz_rule(n_cat:int)->int: return min(50, (n_cat//2)+1)
def emb_sz_rule(n_cat:int)->int: return min(600, round(1.6 * n_cat**0.56))
def def_emb_sz(classes, n, sz_dict=None):
"Pick an embedding size for `n` depending on `classes` if not given in `sz_dict`."
sz_dict = ifnone(sz_dict, {})
n_cat = len(classes[n])
sz = sz_dict.get(n, int(emb_sz_rule(n_cat))) # rule of thumb
return n_cat,sz
class TabularLine(ItemBase):
"Basic item for tabular data."
def __init__(self, cats, conts, classes, names):
self.cats,self.conts,self.classes,self.names = cats,conts,classes,names
self.data = [tensor(cats), tensor(conts)]
def __str__(self):
res = ''
for c, n in zip(self.cats, self.names[:len(self.cats)]):
res += f"{n} {(self.classes[n][c])}; "
for c,n in zip(self.conts, self.names[len(self.cats):]):
res += f'{n} {c:.4f}; '
return res
class TabularProcessor(PreProcessor):
"Regroup the `procs` in one `PreProcessor`."
def __init__(self, ds:ItemBase=None, procs=None):
procs = ifnone(procs, ds.procs if ds is not None else None)
self.procs = listify(procs)
def process_one(self, item):
df = pd.DataFrame([item,item])
for proc in self.procs: proc(df, test=True)
if len(self.cat_names) != 0:
codes = np.stack([c.cat.codes.values for n,c in df[self.cat_names].items()], 1).astype(np.int64) + 1
else: codes = [[]]
if len(self.cont_names) != 0:
conts = np.stack([c.astype('float32').values for n,c in df[self.cont_names].items()], 1)
else: conts = [[]]
classes = None
col_names = list(df[self.cat_names].columns.values) + list(df[self.cont_names].columns.values)
return TabularLine(codes[0], conts[0], classes, col_names)
def process(self, ds):
if ds.inner_df is None:
ds.classes,ds.cat_names,ds.cont_names = self.classes,self.cat_names,self.cont_names
ds.col_names = self.cat_names + self.cont_names
ds.preprocessed = True
return
for i,proc in enumerate(self.procs):
if isinstance(proc, TabularProc): proc(ds.inner_df, test=True)
else:
#cat and cont names may have been changed by transform (like Fill_NA)
proc = proc(ds.cat_names, ds.cont_names)
proc(ds.inner_df)
ds.cat_names,ds.cont_names = proc.cat_names,proc.cont_names
self.procs[i] = proc
self.cat_names,self.cont_names = ds.cat_names,ds.cont_names
if len(ds.cat_names) != 0:
ds.codes = np.stack([c.cat.codes.values for n,c in ds.inner_df[ds.cat_names].items()], 1).astype(np.int64) + 1
self.classes = ds.classes = OrderedDict({n:np.concatenate([['#na#'],c.cat.categories.values])
for n,c in ds.inner_df[ds.cat_names].items()})
cat_cols = list(ds.inner_df[ds.cat_names].columns.values)
else: ds.codes,ds.classes,self.classes,cat_cols = None,None,None,[]
if len(ds.cont_names) != 0:
ds.conts = np.stack([c.astype('float32').values for n,c in ds.inner_df[ds.cont_names].items()], 1)
cont_cols = list(ds.inner_df[ds.cont_names].columns.values)
else: ds.conts,cont_cols = None,[]
ds.col_names = cat_cols + cont_cols
ds.preprocessed = True
class TabularDataBunch(DataBunch):
"Create a `DataBunch` suitable for tabular data."
@classmethod
def from_df(cls, path, df:DataFrame, dep_var:str, valid_idx:Collection[int], procs:OptTabTfms=None,
cat_names:OptStrList=None, cont_names:OptStrList=None, classes:Collection=None,
test_df=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None,
device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False)->DataBunch:
"Create a `DataBunch` from `df` and `valid_idx` with `dep_var`. `kwargs` are passed to `DataBunch.create`."
cat_names = ifnone(cat_names, []).copy()
cont_names = ifnone(cont_names, list(set(df)-set(cat_names)-{dep_var}))
procs = listify(procs)
src = (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(valid_idx))
src = src.label_from_df(cols=dep_var) if classes is None else src.label_from_df(cols=dep_var, classes=classes)
if test_df is not None: src.add_test(TabularList.from_df(test_df, cat_names=cat_names, cont_names=cont_names,
processor = src.train.x.processor))
return src.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, device=device,
collate_fn=collate_fn, no_check=no_check)
class TabularList(ItemList):
"Basic `ItemList` for tabular data."
_item_cls=TabularLine
_processor=TabularProcessor
_bunch=TabularDataBunch
def __init__(self, items:Iterator, cat_names:OptStrList=None, cont_names:OptStrList=None,
procs=None, **kwargs)->'TabularList':
super().__init__(range_of(items), **kwargs)
#dataframe is in inner_df, items is just a range of index
if cat_names is None: cat_names = []
if cont_names is None: cont_names = []
self.cat_names,self.cont_names,self.procs = cat_names,cont_names,procs
self.copy_new += ['cat_names', 'cont_names', 'procs']
self.preprocessed = False
@classmethod
def from_df(cls, df:DataFrame, cat_names:OptStrList=None, cont_names:OptStrList=None, procs=None, **kwargs)->'ItemList':
"Get the list of inputs in the `col` of `path/csv_name`."
return cls(items=range(len(df)), cat_names=cat_names, cont_names=cont_names, procs=procs, inner_df=df.copy(), **kwargs)
def get(self, o):
if not self.preprocessed: return self.inner_df.iloc[o] if hasattr(self, 'inner_df') else self.items[o]
codes = [] if self.codes is None else self.codes[o]
conts = [] if self.conts is None else self.conts[o]
return self._item_cls(codes, conts, self.classes, self.col_names)
def get_emb_szs(self, sz_dict=None):
"Return the default embedding sizes suitable for this data or takes the ones in `sz_dict`."
return [def_emb_sz(self.classes, n, sz_dict) for n in self.cat_names]
def reconstruct(self, t:Tensor):
return self._item_cls(t[0], t[1], self.classes, self.col_names)
def show_xys(self, xs, ys)->None:
"Show the `xs` (inputs) and `ys` (targets)."
from IPython.display import display, HTML
items,names = [], xs[0].names + ['target']
for i, (x,y) in enumerate(zip(xs,ys)):
res = []
cats = x.cats if len(x.cats.size()) > 0 else []
conts = x.conts if len(x.conts.size()) > 0 else []
for c, n in zip(cats, x.names[:len(cats)]):
res.append(x.classes[n][c])
res += [f'{c:.4f}' for c in conts] + [y]
items.append(res)
items = np.array(items)
df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
with pd.option_context('display.max_colwidth', -1):
display(HTML(df.to_html(index=False)))
def show_xyzs(self, xs, ys, zs):
"Show `xs` (inputs), `ys` (targets) and `zs` (predictions)."
from IPython.display import display, HTML
items,names = [], xs[0].names + ['target', 'prediction']
for i, (x,y,z) in enumerate(zip(xs,ys,zs)):
res = []
cats = x.cats if len(x.cats.size()) > 0 else []
conts = x.conts if len(x.conts.size()) > 0 else []
for c, n in zip(cats, x.names[:len(cats)]):
res.append(str(x.classes[n][c]))
res += [f'{c:.4f}' for c in conts] + [y, z]
items.append(res)
items = np.array(items)
df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names)
with pd.option_context('display.max_colwidth', -1):
display(HTML(df.to_html(index=False)))
def tabular_learner(data:DataBunch, layers:Collection[int], emb_szs:Dict[str,int]=None, metrics=None,
ps:Collection[float]=None, emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, **learn_kwargs):
"Get a `Learner` using `data`, with `metrics`, including a `TabularModel` created using the remaining params."
emb_szs = data.get_emb_szs(ifnone(emb_szs, {}))
model = TabularModel(emb_szs, len(data.cont_names), out_sz=data.c, layers=layers, ps=ps, emb_drop=emb_drop,
y_range=y_range, use_bn=use_bn)
return Learner(data, model, metrics=metrics, **learn_kwargs)
| 52.162011
| 134
| 0.630395
|
9c134f51f7254102c61f7fa66304b0fa2e45a7d4
| 533
|
py
|
Python
|
marginTrading/tests/test_spam/test_usuarios.py
|
sambiase/pycrypto
|
7e3fa9a846edd0ee95c9b1584385c38fa9ae3ee2
|
[
"MIT"
] | 3
|
2021-07-30T14:54:27.000Z
|
2022-01-19T19:57:12.000Z
|
marginTrading/tests/test_spam/test_usuarios.py
|
sambiase/pycrypto
|
7e3fa9a846edd0ee95c9b1584385c38fa9ae3ee2
|
[
"MIT"
] | 5
|
2021-07-20T22:41:41.000Z
|
2021-09-07T19:47:18.000Z
|
marginTrading/tests/test_spam/test_usuarios.py
|
sambiase/pycrypto
|
7e3fa9a846edd0ee95c9b1584385c38fa9ae3ee2
|
[
"MIT"
] | null | null | null |
from marginTrading.spam.modelos import Usuario
def test_salvar_usuario(sessao):
usuario = Usuario(nome='Andre',email='andreteste@gmail.com')
sessao.salvar(usuario)
assert isinstance(usuario.id,int) # certifica que o usuario possui um ID e que é instancia do tipo int
def test_listar_usuario(sessao):
usuarios =
for usuario in usuarios: # salvar todos os usuarios na Lista
sessao.salvar(usuario)
assert usuarios == sessao.listar() # certifica que o usuario esta na lista de usuarios
| 33.3125
| 108
| 0.722326
|
db142bbd67aaca22d0cac45dcc53ce53030942e5
| 4,659
|
py
|
Python
|
tests/performance/compare_perfs.py
|
PrecisionMetrics/openjpeg
|
eb2ebe92f93970bf82ee3b69c32a6975900e91a0
|
[
"BSD-2-Clause"
] | 823
|
2015-02-16T08:42:47.000Z
|
2022-03-28T08:37:57.000Z
|
tests/performance/compare_perfs.py
|
PrecisionMetrics/openjpeg
|
eb2ebe92f93970bf82ee3b69c32a6975900e91a0
|
[
"BSD-2-Clause"
] | 832
|
2015-06-15T07:57:22.000Z
|
2022-03-31T12:41:46.000Z
|
tests/performance/compare_perfs.py
|
PrecisionMetrics/openjpeg
|
eb2ebe92f93970bf82ee3b69c32a6975900e91a0
|
[
"BSD-2-Clause"
] | 522
|
2015-03-10T18:53:47.000Z
|
2022-03-25T21:05:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, IntoPIX SA
# Contact: support@intopix.com
# Author: Even Rouault
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
def Usage():
print('Usage: compare_perfs.py [-noise_threshold val_in_pct]')
print(' [-warning_threshold val_in_pct]')
print(' [-error_threshold val_in_pct]')
print(' [-global_error_threshold val_in_pct]')
print(' ref.csv new.csv')
sys.exit(1)
ref_filename = None
new_filename = None
noise_threshold = 2
warning_threshold = 4
error_threshold = 6
global_error_threshold = 2
i = 1
while i < len(sys.argv):
if sys.argv[i] == '-noise_threshold' and i + 1 < len(sys.argv):
i += 1
noise_threshold = int(sys.argv[i])
elif sys.argv[i] == '-warning_threshold' and i + 1 < len(sys.argv):
i += 1
warning_threshold = int(sys.argv[i])
elif sys.argv[i] == '-error_threshold' and i + 1 < len(sys.argv):
i += 1
error_threshold = int(sys.argv[i])
elif sys.argv[i] == '-global_error_threshold' and i + 1 < len(sys.argv):
i += 1
global_error_threshold = int(sys.argv[i])
elif sys.argv[i][0] == '-':
Usage()
elif ref_filename is None:
ref_filename = sys.argv[i]
elif new_filename is None:
new_filename = sys.argv[i]
else:
Usage()
i += 1
if ref_filename is None or new_filename is None:
Usage()
assert noise_threshold < warning_threshold
assert warning_threshold < error_threshold
assert global_error_threshold >= noise_threshold
assert global_error_threshold <= error_threshold
ref_lines = open(ref_filename, 'rt').readlines()[1:]
new_lines = open(new_filename, 'rt').readlines()[1:]
if len(ref_lines) != len(new_lines):
raise Exception('files are not comparable')
ret_code = 0
for i in range(len(ref_lines)):
line = ref_lines[i].replace('\n', '')
filename_ref, num_iterations_ref, num_threads_ref, command_ref, \
_, time_ms_ref = line.split(',')
line = new_lines[i].replace('\n', '')
filename_new, num_iterations_new, num_threads_new, command_new, \
_, time_ms_new = line.split(',')
assert filename_ref == filename_new
assert num_iterations_ref == num_iterations_new
assert num_threads_ref == num_threads_new
assert command_ref == command_new
time_ms_ref = int(time_ms_ref)
time_ms_new = int(time_ms_new)
if filename_ref == 'TOTAL':
display = 'TOTAL'
else:
display = '%s, %s iterations, %s threads, %s' % \
(filename_ref, num_iterations_ref, num_threads_ref, command_ref)
display += ': ref_time %d ms, new_time %d ms' % (time_ms_ref, time_ms_new)
var_pct = 100.0 * (time_ms_new - time_ms_ref) / time_ms_ref
if abs(var_pct) <= noise_threshold:
display += ', (stable) %0.1f %%' % var_pct
elif var_pct < 0:
display += ', (improvement) %0.1f %%' % var_pct
else:
display += ', (regression) %0.1f %%' % var_pct
if filename_ref == 'TOTAL' and var_pct > global_error_threshold:
display += ', ERROR_THRESHOLD'
ret_code = 1
elif var_pct > error_threshold:
display += ', ERROR_THRESHOLD'
ret_code = 1
elif var_pct > warning_threshold:
display += ', WARNING_THRESHOLD'
print(display)
sys.exit(ret_code)
| 38.504132
| 78
| 0.672677
|
e9a9ffafc76c405d5cc240fff695782388d57a60
| 20,641
|
py
|
Python
|
sdk/metricsadvisor/azure-ai-metricsadvisor/tests/base_testcase_aad.py
|
a-santamaria/azure-sdk-for-python
|
9dec418ad621ac75f217e56e901f15b6624800b0
|
[
"MIT"
] | null | null | null |
sdk/metricsadvisor/azure-ai-metricsadvisor/tests/base_testcase_aad.py
|
a-santamaria/azure-sdk-for-python
|
9dec418ad621ac75f217e56e901f15b6624800b0
|
[
"MIT"
] | null | null | null |
sdk/metricsadvisor/azure-ai-metricsadvisor/tests/base_testcase_aad.py
|
a-santamaria/azure-sdk-for-python
|
9dec418ad621ac75f217e56e901f15b6624800b0
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import datetime
from devtools_testutils import AzureTestCase
from azure_devtools.scenario_tests import (
ReplayableTest,
create_random_name
)
from azure.ai.metricsadvisor import (
MetricsAdvisorKeyCredential,
MetricsAdvisorAdministrationClient,
MetricsAdvisorClient,
)
from azure.ai.metricsadvisor.models import (
SQLServerDataFeed,
DataFeedSchema,
DataFeedMetric,
DataFeedDimension,
DataFeedGranularity,
DataFeedIngestionSettings,
DataFeedMissingDataPointFillSettings,
DataFeedRollupSettings,
DataFeedOptions,
MetricAlertConfiguration,
MetricAnomalyAlertScope,
MetricAnomalyAlertConditions,
MetricBoundaryCondition,
TopNGroupScope,
SeverityCondition,
MetricDetectionCondition,
MetricSeriesGroupDetectionCondition,
MetricSingleSeriesDetectionCondition,
SmartDetectionCondition,
SuppressCondition,
ChangeThresholdCondition,
HardThresholdCondition,
EmailNotificationHook,
WebNotificationHook,
)
from azure.identity import DefaultAzureCredential
class MockCredential():
def get_token(self, *scopes, **kwargs):
from azure.core.credentials import AccessToken
return AccessToken("fake-token", 0)
class TestMetricsAdvisorAdministrationClientBase(AzureTestCase):
FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key', 'x-api-key']
def __init__(self, method_name):
super(TestMetricsAdvisorAdministrationClientBase, self).__init__(method_name)
self.vcr.match_on = ["path", "method", "query"]
if self.is_live:
service_endpoint = self.get_settings_value("METRICS_ADVISOR_ENDPOINT")
self.sql_server_connection_string = self.get_settings_value("METRICS_ADVISOR_SQL_SERVER_CONNECTION_STRING")
self.azure_table_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_TABLE_CONNECTION_STRING")
self.azure_blob_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_BLOB_CONNECTION_STRING")
self.azure_cosmosdb_connection_string = self.get_settings_value("METRICS_ADVISOR_COSMOS_DB_CONNECTION_STRING")
self.http_request_get_url = self.get_settings_value("METRICS_ADVISOR_HTTP_GET_URL")
self.http_request_post_url = self.get_settings_value("METRICS_ADVISOR_HTTP_POST_URL")
self.application_insights_api_key = self.get_settings_value("METRICS_ADVISOR_APPLICATION_INSIGHTS_API_KEY")
self.azure_data_explorer_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_DATA_EXPLORER_CONNECTION_STRING")
self.influxdb_connection_string = self.get_settings_value("METRICS_ADVISOR_INFLUX_DB_CONNECTION_STRING")
self.influxdb_password = self.get_settings_value("METRICS_ADVISOR_INFLUX_DB_PASSWORD")
self.azure_datalake_account_key = self.get_settings_value("METRICS_ADVISOR_AZURE_DATALAKE_ACCOUNT_KEY")
self.mongodb_connection_string = self.get_settings_value("METRICS_ADVISOR_AZURE_MONGO_DB_CONNECTION_STRING")
self.mysql_connection_string = self.get_settings_value("METRICS_ADVISOR_MYSQL_CONNECTION_STRING")
self.postgresql_connection_string = self.get_settings_value("METRICS_ADVISOR_POSTGRESQL_CONNECTION_STRING")
self.elasticsearch_auth_header = self.get_settings_value("METRICS_ADVISOR_ELASTICSEARCH_AUTH_HEADER")
self.anomaly_detection_configuration_id = self.get_settings_value("METRICS_ADVISOR_ANOMALY_DETECTION_CONFIGURATION_ID")
self.data_feed_id = self.get_settings_value("METRICS_ADVISOR_DATA_FEED_ID")
self.metric_id = self.get_settings_value("METRICS_ADVISOR_METRIC_ID")
credential = DefaultAzureCredential()
self.scrubber.register_name_pair(
self.sql_server_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_table_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_blob_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_cosmosdb_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.http_request_get_url,
"connectionstring"
)
self.scrubber.register_name_pair(
self.http_request_post_url,
"connectionstring"
)
self.scrubber.register_name_pair(
self.application_insights_api_key,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_data_explorer_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.influxdb_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.influxdb_password,
"connectionstring"
)
self.scrubber.register_name_pair(
self.azure_datalake_account_key,
"connectionstring"
)
self.scrubber.register_name_pair(
self.mongodb_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.mysql_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.postgresql_connection_string,
"connectionstring"
)
self.scrubber.register_name_pair(
self.elasticsearch_auth_header,
"connectionstring"
)
self.scrubber.register_name_pair(
self.metric_id,
"metric_id"
)
self.scrubber.register_name_pair(
self.data_feed_id,
"data_feed_id"
)
self.scrubber.register_name_pair(
self.anomaly_detection_configuration_id,
"anomaly_detection_configuration_id"
)
else:
service_endpoint = "https://endpointname.cognitiveservices.azure.com"
self.sql_server_connection_string = "SQL_SERVER_CONNECTION_STRING"
self.azure_table_connection_string = "AZURE_TABLE_CONNECTION_STRING"
self.azure_blob_connection_string = "AZURE_BLOB_CONNECTION_STRING"
self.azure_cosmosdb_connection_string = "COSMOS_DB_CONNECTION_STRING"
self.http_request_get_url = "METRICS_ADVISOR_HTTP_GET_URL"
self.http_request_post_url = "METRICS_ADVISOR_HTTP_POST_URL"
self.application_insights_api_key = "METRICS_ADVISOR_APPLICATION_INSIGHTS_API_KEY"
self.azure_data_explorer_connection_string = "METRICS_ADVISOR_AZURE_DATA_EXPLORER_CONNECTION_STRING"
self.influxdb_connection_string = "METRICS_ADVISOR_INFLUXDB_CONNECTION_STRING"
self.influxdb_password = "METRICS_ADVISOR_INFLUXDB_PASSWORD"
self.azure_datalake_account_key = "METRICS_ADVISOR_AZURE_DATALAKE_ACCOUNT_KEY"
self.mongodb_connection_string = "METRICS_ADVISOR_AZURE_MONGODB_CONNECTION_STRING"
self.mysql_connection_string = "METRICS_ADVISOR_MYSQL_CONNECTION_STRING"
self.postgresql_connection_string = "METRICS_ADVISOR_POSTGRESQL_CONNECTION_STRING"
self.elasticsearch_auth_header = "METRICS_ADVISOR_ELASTICSEARCH_AUTH"
self.anomaly_detection_configuration_id = "anomaly_detection_configuration_id"
self.metric_id = "metric_id"
self.data_feed_id = "data_feed_id"
credential = MockCredential()
self.admin_client = MetricsAdvisorAdministrationClient(service_endpoint, credential)
def _create_data_feed(self, name):
name = create_random_name(name)
return self.admin_client.create_data_feed(
name=name,
source=SQLServerDataFeed(
connection_string=self.sql_server_connection_string,
query="select * from adsample2 where Timestamp = @StartTime"
),
granularity="Daily",
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost"),
DataFeedMetric(name="revenue")
],
dimensions=[
DataFeedDimension(name="category"),
DataFeedDimension(name="city")
],
),
ingestion_settings="2019-10-01T00:00:00Z",
)
def _create_data_feed_and_detection_config(self, name):
data_feed = self._create_data_feed(name)
detection_config_name = create_random_name(name)
detection_config = self.admin_client.create_detection_configuration(
name=detection_config_name,
metric_id=data_feed.metric_ids[0],
description="testing",
whole_series_detection_condition=MetricDetectionCondition(
smart_detection_condition=SmartDetectionCondition(
sensitivity=50,
anomaly_detector_direction="Both",
suppress_condition=SuppressCondition(
min_number=50,
min_ratio=50
)
)
)
)
return detection_config, data_feed
def _create_data_feed_for_update(self, name):
data_feed_name = create_random_name(name)
return self.admin_client.create_data_feed(
name=data_feed_name,
source=SQLServerDataFeed(
connection_string=self.sql_server_connection_string,
query=u"select * from adsample2 where Timestamp = @StartTime"
),
granularity=DataFeedGranularity(
granularity_type="Daily",
),
schema=DataFeedSchema(
metrics=[
DataFeedMetric(name="cost", display_name="display cost", description="the cost"),
DataFeedMetric(name="revenue", display_name="display revenue", description="the revenue")
],
dimensions=[
DataFeedDimension(name="category", display_name="display category"),
DataFeedDimension(name="city", display_name="display city")
],
timestamp_column="Timestamp"
),
ingestion_settings=DataFeedIngestionSettings(
ingestion_begin_time=datetime.datetime(2019, 10, 1),
data_source_request_concurrency=0,
ingestion_retry_delay=-1,
ingestion_start_offset=-1,
stop_retry_after=-1,
),
options=DataFeedOptions(
admin_emails=["yournamehere@microsoft.com"],
data_feed_description="my first data feed",
missing_data_point_fill_settings=DataFeedMissingDataPointFillSettings(
fill_type="SmartFilling"
),
rollup_settings=DataFeedRollupSettings(
rollup_type="NoRollup",
rollup_method="None",
),
viewer_emails=["viewers"],
access_mode="Private",
action_link_template="action link template"
)
)
def _create_alert_config_for_update(self, name):
detection_config, data_feed = self._create_data_feed_and_detection_config(name)
alert_config_name = create_random_name(name)
alert_config = self.admin_client.create_alert_configuration(
name=alert_config_name,
cross_metrics_operator="AND",
metric_alert_configurations=[
MetricAlertConfiguration(
detection_configuration_id=detection_config.id,
alert_scope=MetricAnomalyAlertScope(
scope_type="TopN",
top_n_group_in_scope=TopNGroupScope(
top=5,
period=10,
min_top_count=9
)
),
alert_conditions=MetricAnomalyAlertConditions(
metric_boundary_condition=MetricBoundaryCondition(
direction="Both",
companion_metric_id=data_feed.metric_ids[0],
lower=1.0,
upper=5.0
)
)
),
MetricAlertConfiguration(
detection_configuration_id=detection_config.id,
alert_scope=MetricAnomalyAlertScope(
scope_type="SeriesGroup",
series_group_in_scope={'city': 'Shenzhen'}
),
alert_conditions=MetricAnomalyAlertConditions(
severity_condition=SeverityCondition(
min_alert_severity="Low",
max_alert_severity="High"
)
)
),
MetricAlertConfiguration(
detection_configuration_id=detection_config.id,
alert_scope=MetricAnomalyAlertScope(
scope_type="WholeSeries"
),
alert_conditions=MetricAnomalyAlertConditions(
severity_condition=SeverityCondition(
min_alert_severity="Low",
max_alert_severity="High"
)
)
)
],
hook_ids=[]
)
return alert_config, data_feed, detection_config
def _create_detection_config_for_update(self, name):
data_feed = self._create_data_feed(name)
detection_config_name = create_random_name("testupdated")
detection_config = self.admin_client.create_detection_configuration(
name=detection_config_name,
metric_id=data_feed.metric_ids[0],
description="My test metric anomaly detection configuration",
whole_series_detection_condition=MetricDetectionCondition(
cross_conditions_operator="AND",
smart_detection_condition=SmartDetectionCondition(
sensitivity=50,
anomaly_detector_direction="Both",
suppress_condition=SuppressCondition(
min_number=50,
min_ratio=50
)
),
hard_threshold_condition=HardThresholdCondition(
anomaly_detector_direction="Both",
suppress_condition=SuppressCondition(
min_number=5,
min_ratio=5
),
lower_bound=0,
upper_bound=100
),
change_threshold_condition=ChangeThresholdCondition(
change_percentage=50,
shift_point=30,
within_range=True,
anomaly_detector_direction="Both",
suppress_condition=SuppressCondition(
min_number=2,
min_ratio=2
)
)
),
series_detection_conditions=[MetricSingleSeriesDetectionCondition(
series_key={"city": "Shenzhen", "category": "Jewelry"},
smart_detection_condition=SmartDetectionCondition(
anomaly_detector_direction="Both",
sensitivity=63,
suppress_condition=SuppressCondition(
min_number=1,
min_ratio=100
)
)
)],
series_group_detection_conditions=[MetricSeriesGroupDetectionCondition(
series_group_key={"city": "Sao Paulo"},
smart_detection_condition=SmartDetectionCondition(
anomaly_detector_direction="Both",
sensitivity=63,
suppress_condition=SuppressCondition(
min_number=1,
min_ratio=100
)
)
)]
)
return detection_config, data_feed
def _create_email_hook_for_update(self, name):
return self.admin_client.create_hook(
hook=EmailNotificationHook(
name=name,
emails_to_alert=["yournamehere@microsoft.com"],
description="my email hook",
external_link="external link"
)
)
def _create_web_hook_for_update(self, name):
return self.admin_client.create_hook(
hook=WebNotificationHook(
name=name,
endpoint="https://httpbin.org/post",
description="my web hook",
external_link="external link",
username="krista",
password="123"
)
)
class TestMetricsAdvisorClientBase(AzureTestCase):
FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['Ocp-Apim-Subscription-Key', 'x-api-key']
def __init__(self, method_name):
super(TestMetricsAdvisorClientBase, self).__init__(method_name)
self.vcr.match_on = ["path", "method", "query"]
if self.is_live:
service_endpoint = self.get_settings_value("METRICS_ADVISOR_ENDPOINT")
self.anomaly_detection_configuration_id = self.get_settings_value("METRICS_ADVISOR_ANOMALY_DETECTION_CONFIGURATION_ID")
self.anomaly_alert_configuration_id = self.get_settings_value("METRICS_ADVISOR_ANOMALY_ALERT_CONFIGURATION_ID")
self.metric_id = self.get_settings_value("METRICS_ADVISOR_METRIC_ID")
self.incident_id = self.get_settings_value("METRICS_ADVISOR_INCIDENT_ID")
self.dimension_name = self.get_settings_value("METRICS_ADVISOR_DIMENSION_NAME")
self.feedback_id = self.get_settings_value("METRICS_ADVISOR_FEEDBACK_ID")
self.alert_id = self.get_settings_value("METRICS_ADVISOR_ALERT_ID")
credential = DefaultAzureCredential()
self.scrubber.register_name_pair(
self.anomaly_detection_configuration_id,
"anomaly_detection_configuration_id"
)
self.scrubber.register_name_pair(
self.anomaly_alert_configuration_id,
"anomaly_alert_configuration_id"
)
self.scrubber.register_name_pair(
self.metric_id,
"metric_id"
)
self.scrubber.register_name_pair(
self.incident_id,
"incident_id"
)
self.scrubber.register_name_pair(
self.dimension_name,
"dimension_name"
)
self.scrubber.register_name_pair(
self.feedback_id,
"feedback_id"
)
self.scrubber.register_name_pair(
self.alert_id,
"alert_id"
)
else:
service_endpoint = "https://endpointname.cognitiveservices.azure.com"
self.anomaly_detection_configuration_id = "anomaly_detection_configuration_id"
self.anomaly_alert_configuration_id = "anomaly_alert_configuration_id"
self.metric_id = "metric_id"
self.incident_id = "incident_id"
self.dimension_name = "dimension_name"
self.feedback_id = "feedback_id"
self.alert_id = "alert_id"
credential = MockCredential()
self.client = MetricsAdvisorClient(service_endpoint, credential)
| 44.677489
| 137
| 0.604283
|
e2eba7521e600ddada043006185707a7d1480f9c
| 12,562
|
bzl
|
Python
|
kotlin/kotlin.bzl
|
hsyed/rules_kotlin_old
|
7abdd03cf63dcbc629033aac82de7681d27eebe0
|
[
"Apache-2.0"
] | 9
|
2018-02-08T04:07:22.000Z
|
2019-11-15T04:17:33.000Z
|
kotlin/kotlin.bzl
|
hsyed/rules_kotlin_old
|
7abdd03cf63dcbc629033aac82de7681d27eebe0
|
[
"Apache-2.0"
] | null | null | null |
kotlin/kotlin.bzl
|
hsyed/rules_kotlin_old
|
7abdd03cf63dcbc629033aac82de7681d27eebe0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kotlin Rules
### Setup
Add the following snippet to your `WORKSPACE` file:
```bzl
git_repository(
name = "io_bazel_rules_kotlin",
remote = "https://github.com/bazelbuild/rules_kotlin.git",
commit = "<COMMIT_HASH>",
)
load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kotlin_repositories")
kotlin_repositories(kotlin_release_version = "1.2.21")
```
To enable persistent worker support, add the following to the appropriate `bazelrc` file:
```
build --strategy=KotlinCompile=worker
test --strategy=KotlinCompile=worker
```
### Standard Libraries
The Kotlin libraries that are bundled in a kotlin release should be used with the rules, the mandatory standard libraries are added implicetly. After enabling
the repository the following Kotlin Libraries are also made available from the workspace `com_github_jetbrains_kotlin`:
* `kotlin-test`,
* `kotlin-reflect`.
So if you needed to add reflect as a dep use the following label `@com_github_jetbrains_kotlin//:reflect`.
### Caveats
* The compiler is currently not configurable [issue](https://github.com/hsyed/rules_kotlin/issues/3).
* The compiler is hardwired to target jdk8 and language and api levels "1.2" [issue](https://github.com/hsyed/rules_kotlin/issues/3).
"""
# This file is the main import -- it shouldn't grow out of hand the reason it contains so much allready is due to the limitations of skydoc.
########################################################################################################################
# Common Definitions
########################################################################################################################
load("//kotlin/rules:defs.bzl", "KOTLIN_REPO_ROOT")
# The files types that may be passed to the core Kotlin compile rule.
_kt_compile_filetypes = FileType([
".kt",
".java",
])
_jar_filetype = FileType([".jar"])
_srcjar_filetype = FileType([
".jar",
"-sources.jar",
])
########################################################################################################################
# Rule Attributes
########################################################################################################################
_implicit_deps = {
"_kotlin_compiler_classpath": attr.label_list(
allow_files = True,
default = [
Label("@" + KOTLIN_REPO_ROOT + "//:compiler"),
Label("@" + KOTLIN_REPO_ROOT + "//:reflect"),
Label("@" + KOTLIN_REPO_ROOT + "//:script-runtime"),
],
),
"_kotlinw": attr.label(
default = Label("//kotlin/workers:compiler_jvm"),
executable = True,
cfg = "host",
),
# The kotlin runtime
"_kotlin_runtime": attr.label(
single_file = True,
default = Label("@" + KOTLIN_REPO_ROOT + "//:runtime"),
),
# The kotlin stdlib
"_kotlin_std": attr.label_list(default = [
Label("@" + KOTLIN_REPO_ROOT + "//:stdlib"),
Label("@" + KOTLIN_REPO_ROOT + "//:stdlib-jdk7"),
Label("@" + KOTLIN_REPO_ROOT + "//:stdlib-jdk8"),
]),
"_kotlin_reflect": attr.label(
single_file = True,
default =
Label("@" + KOTLIN_REPO_ROOT + "//:reflect"),
),
"_singlejar": attr.label(
executable = True,
cfg = "host",
default = Label("@bazel_tools//tools/jdk:singlejar"),
allow_files = True,
),
"_zipper": attr.label(
executable = True,
cfg = "host",
default = Label("@bazel_tools//tools/zip:zipper"),
allow_files = True,
),
"_java": attr.label(
executable = True,
cfg = "host",
default = Label("@bazel_tools//tools/jdk:java"),
allow_files = True,
),
"_jdk": attr.label(
default = Label("@bazel_tools//tools/jdk"),
cfg = "host",
allow_files = True,
),
# "_langtools": attr.label(
# default = Label("@bazel_tools//tools/jdk:langtools"),
# cfg = "host",
# allow_files = True
# ),
"_java_stub_template": attr.label(default = Label("@kt_java_stub_template//file")),
}
_common_attr = dict(_implicit_deps.items() + {
"srcs": attr.label_list(
default = [],
allow_files = _kt_compile_filetypes,
),
# only accept deps which are java providers.
"deps": attr.label_list(),
"runtime_deps": attr.label_list(default = []),
# Add debugging info for any rules.
# "verbose": attr.int(default = 0),
# "opts": attr.string_dict(),
# Advanced options
# "x_opts": attr.string_list(),
# Plugin options
# "plugin_opts": attr.string_dict(),
"resources": attr.label_list(
default = [],
allow_files = True,
),
"resource_strip_prefix": attr.string(default = ""),
"resource_jars": attr.label_list(default = []),
# Other args for the compiler
}.items())
_runnable_common_attr = dict(_common_attr.items() + {
"data": attr.label_list(
allow_files = True,
cfg = "data",
),
"jvm_flags": attr.string_list(
default = [],
),
}.items())
########################################################################################################################
# Outputs: All the outputs produced by the various rules are modelled here.
########################################################################################################################
_common_outputs = dict(
jar = "%{name}.jar",
jdeps = "%{name}.jdeps",
srcjar = "%{name}-sources.jar",
)
_binary_outputs = dict(_common_outputs.items() + {
# "wrapper": "%{name}_wrapper.sh",
}.items())
########################################################################################################################
# Repositories
########################################################################################################################
load(
"//kotlin:kotlin_compiler_repositories.bzl",
"KOTLIN_CURRENT_RELEASE",
_kotlin_compiler_repository = "kotlin_compiler_repository",
)
def kotlin_repositories(
kotlin_release_version=KOTLIN_CURRENT_RELEASE
):
"""Call this in the WORKSPACE file to setup the Kotlin rules.
Args:
kotlin_release_version: The kotlin compiler release version. If this is not set the latest release version is
chosen by default.
"""
_kotlin_compiler_repository(kotlin_release_version)
########################################################################################################################
# Simple Rules:
########################################################################################################################
load(
"//kotlin/rules:rules.bzl",
_kotlin_binary_impl = "kotlin_binary_impl",
_kotlin_import_impl = "kotlin_import_impl",
_kotlin_junit_test_impl = "kotlin_junit_test_impl",
_kotlin_library_impl = "kotlin_library_impl",
)
kotlin_library = rule(
attrs = dict(_common_attr.items() + {
"exports": attr.label_list(default = []),
}.items()),
outputs = _common_outputs,
implementation = _kotlin_library_impl,
)
"""This rule compiles and links Kotlin and Java sources into a .jar file.
Args:
srcs: The list of source files that are processed to create the target, this can contain both Java and Kotlin files. Java analysis occurs first so Kotlin
classes may depend on Java classes in the same compilation unit.
exports: Exported libraries.
Deps listed here will be made available to other rules, as if the parents explicitly depended on these deps.
This is not true for regular (non-exported) deps.
resources: A list of data files to include in a Java jar.
resource_strip_prefix: The path prefix to strip from Java resources, files residing under common prefix such as `src/main/resources` or `src/test/resources`
will have stripping applied by convention.
resource_jars: Set of archives containing Java resources. If specified, the contents of these jars are merged into the output jar.
runtime_deps: Libraries to make available to the final binary or test at runtime only. Like ordinary deps, these will appear on the runtime classpath, but
unlike them, not on the compile-time classpath.
data: The list of files needed by this rule at runtime. See general comments about `data` at [Attributes common to all build rules](https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes).
deps: A list of dependencies of this rule.See general comments about `deps` at [Attributes common to all build rules](https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes).
"""
kotlin_binary = rule(
attrs = dict(_runnable_common_attr.items() + {"main_class": attr.string(mandatory = True)}.items()),
executable = True,
outputs = _binary_outputs,
implementation = _kotlin_binary_impl,
)
"""Builds a Java archive ("jar file"), plus a wrapper shell script with the same name as the rule. The wrapper shell script uses a classpath that includes,
among other things, a jar file for each library on which the binary depends.
**Note:** This rule does not have all of the features found in [`java_binary`](https://docs.bazel.build/versions/master/be/java.html#java_binary). It is
appropriate for building workspace utilities. `java_binary` should be preferred for release artefacts.
Args:
main_class: Name of class with main() method to use as entry point.
jvm_flags: A list of flags to embed in the wrapper script generated for running this binary. Note: does not yet support make variable substitution.
"""
kotlin_test = rule(
attrs = dict(_runnable_common_attr.items() + {
"_bazel_test_runner": attr.label(
default = Label("@bazel_tools//tools/jdk:TestRunner_deploy.jar"),
allow_files = True,
),
"test_class": attr.string(),
# "main_class": attr.string(),
}.items()),
executable = True,
outputs = _binary_outputs,
test = True,
implementation = _kotlin_junit_test_impl,
)
"""Setup a simple kotlin_test.
Args:
test_class: The Java class to be loaded by the test runner.
"""
kotlin_import = rule(
attrs = {
"jars": attr.label_list(
allow_files = True,
mandatory = True,
),
"srcjar": attr.label(
allow_single_file = True,
),
},
implementation = _kotlin_import_impl,
)
# The pairing of src and class is used by intellij to attatch sources, this is picked up via the kt provider attribute.
#
# once current format and semantics are finalized add runtime_deps, exports, data, neverlink, testonly.
# * runtime_deps should accept JavaInfo's (this includes KotlinInfo) and maven_jar filegroups.
# * exports should only accept JavaInfo's (this include KotlinInfo) but not filegroup. The jars attribute takes care of importing the jars without generating
# ijars.
"""(experimental) Import Kotlin jars.
## examples
```bzl
# Import a collection of class jars and source jars from filegroup labels.
kotlin_import(
name = "kodein",
jars = [
"@com_github_salomonbrys_kodein_kodein//jar:file",
"@com_github_salomonbrys_kodein_kodein_core//jar:file"
]
)
# Import a single kotlin jar.
kotlin_import(
name = "kotlin-runtime",
jars = ["lib/kotlin-runtime.jar"],
srcjar = "lib/kotlin-runtime-sources.jar"
)
```
Args:
jars: The jars listed here are equavalent to an export attribute. The label should be either to a single class jar, or multiple filegroup labels. When the
labels is a file_provider it should follow the conventions used in repositories generated by the maven_jar rule --i.e., the rule expects a file_provider
with a single class jar and a single source jar. a source jar is recognized by the suffix `-sources.jar`.
srcjar: The sources for the class jar. This should be set when importing a single class jar.
"""
| 38.415902
| 221
| 0.619408
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.