max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tests/test_audit.py | benjaminp/grouper | 0 | 12769551 | <reponame>benjaminp/grouper<filename>tests/test_audit.py<gh_stars>0
from collections import namedtuple
from datetime import datetime, timedelta
from urllib import urlencode
import pytest
from fixtures import standard_graph, graph, users, groups, session, permissions # noqa
from fixtures import fe_app as app # noqa
from grouper.audit import (
assert_can_join, assert_controllers_are_auditors, get_audits, user_is_auditor,
UserNotAuditor,
)
from grouper.models import AuditLog, AuditLogCategory
from url_util import url
from util import add_member, grant_permission
def test_group_audited(standard_graph, session, groups, permissions): # noqa
""" Ensure that the audited flag gets set appropriate only groups and inherited down the
graph. """
graph = standard_graph # noqa
assert not graph.get_group_details("security-team")["audited"]
assert graph.get_group_details("serving-team")["audited"]
assert graph.get_group_details("team-sre")["audited"]
def test_user_is_auditor(standard_graph): # noqa
""" Ensure users get the ability to audit. """
assert user_is_auditor("<EMAIL>")
assert not user_is_auditor("<EMAIL>")
def test_assert_can_join(users, groups): # noqa
""" Test various audit constraints to ensure that users can/can't join as appropriate. """
# Non-auditor can join non-audited group as owner.
assert assert_can_join(groups["team-infra"], users["<EMAIL>"], role="owner")
# Auditor can join non-audited group as owner.
assert assert_can_join(groups["team-infra"], users["<EMAIL>"], role="owner")
# Non-auditor can NOT join audited group as owner.
with pytest.raises(UserNotAuditor):
assert not assert_can_join(groups["serving-team"], users["<EMAIL>"], role="owner")
# Non-auditor can join audited group as member.
assert assert_can_join(groups["serving-team"], users["<EMAIL>"])
# Group with non-auditor owner can NOT join audited group.
with pytest.raises(UserNotAuditor):
assert not assert_can_join(groups["serving-team"], groups["tech-ops"])
# Group with auditor owner can join audited group.
assert assert_can_join(groups["serving-team"], groups["sad-team"])
# Group with non-auditor owner can join non-audited group.
assert assert_can_join(groups["team-infra"], groups["tech-ops"])
# Group with auditor owner, but sub-group with non-auditor owner, can NOT join audited group.
with pytest.raises(UserNotAuditor):
assert not assert_can_join(groups["audited-team"], groups["serving-team"])
def test_assert_controllers_are_auditors(groups): # noqa
""" Test the method that determines if a subtree is controlled by auditors. """
# Group is safely controlled by auditors.
assert assert_controllers_are_auditors(groups["sad-team"])
# Group with non-auditor owner should fail this test.
with pytest.raises(UserNotAuditor):
assert not assert_controllers_are_auditors(groups["team-infra"])
@pytest.mark.gen_test
def test_audit_end_to_end(session, users, groups, http_client, base_url): # noqa
""" Tests an end-to-end audit cycle. """
groupname = 'audited-team'
zay_id = users["<EMAIL>"].id
gary_id = users["<EMAIL>"].id
# make everyone an auditor or global audit will have issues
add_member(groups["auditors"], users["<EMAIL>"])
add_member(groups["auditors"], users["<EMAIL>"])
add_member(groups["auditors"], users["<EMAIL>"])
add_member(groups["auditors"], users["<EMAIL>"])
# add some users to test removal
add_member(groups[groupname], users["<EMAIL>"])
add_member(groups[groupname], users["<EMAIL>"])
# start the audit
end_at_str = (datetime.now() + timedelta(days=10)).strftime('%m/%d/%Y')
fe_url = url(base_url, '/audits/create')
resp = yield http_client.fetch(fe_url, method="POST",
body=urlencode({'ends_at': end_at_str}), headers={'X-Grouper-User': '<EMAIL>'})
assert resp.code == 200
open_audits = get_audits(session, only_open=True).all()
assert len(open_audits) == 4, 'audits created'
assert groupname in [x.group.name for x in open_audits], 'group we expect also gets audit'
# pull all the info we need to resolve audits, avoids detached sqlalchemy sessions
AuditMember = namedtuple('AuditMember', 'am_id, edge_type, edge_id')
Audit = namedtuple('Audit', 'audit_id, owner_name, group_name, audit_members')
all_group_ids = [x.group.id for x in open_audits]
open_audits = [Audit(x.id, x.group.my_owners().iterkeys().next(), x.group.name,
[AuditMember(am.id, am.edge.member_type, am.edge_id) for am in x.my_members()]) for
x in open_audits]
# approve everything but the one we added members to
for one_audit in open_audits:
fe_url = url(base_url, '/audits/{}/complete'.format(one_audit.audit_id))
if one_audit.group_name == groupname:
continue
# blanket approval
body = urlencode({"audit_{}".format(am.am_id): "approved" for am in
one_audit.audit_members})
resp = yield http_client.fetch(fe_url, method="POST", body=body,
headers={'X-Grouper-User': one_audit.owner_name})
assert resp.code == 200
open_audits = get_audits(session, only_open=True).all()
assert len(open_audits) == 1, 'only our test group remaining'
one_audit = open_audits[0]
one_audit.id
body_dict = {}
for am in one_audit.my_members():
if gary_id == am.member.id:
# deny
body_dict["audit_{}".format(am.id)] = "remove"
else:
# approve
body_dict["audit_{}".format(am.id)] = "approved"
owner_name = one_audit.group.my_owners().iterkeys().next()
fe_url = url(base_url, '/audits/{}/complete'.format(one_audit.id))
resp = yield http_client.fetch(fe_url, method="POST", body=urlencode(body_dict),
headers={'X-Grouper-User': owner_name})
assert resp.code == 200
# check all the logs
assert len(AuditLog.get_entries(session, action='start_audit')) == 1, 'global start is logged'
assert len(AuditLog.get_entries(session,
action='complete_global_audit')) == 1, 'global complete is logged'
for group_id in all_group_ids:
assert len(AuditLog.get_entries(session, on_group_id=group_id, action='complete_audit',
category=AuditLogCategory.audit)) == 1, 'complete entry for each group'
assert len(AuditLog.get_entries(session, on_user_id=gary_id,
category=AuditLogCategory.audit)) == 1, 'removal AuditLog entry on user'
| 2.171875 | 2 |
packages/main/src/RPA/scripts/google_authenticate.py | ete99/rpaframework | 0 | 12769552 | import argparse
import base64
import os
import pickle
from google_auth_oauthlib.flow import InstalledAppFlow
SERVICE_SCOPES = {
"drive": ["drive.appdata", "drive.file", "drive.install", "drive"],
"apps-script": ["script.projects"],
}
def get_arguments(parser):
parser.add_argument(
"--credentials",
dest="credentials_file",
default="credentials.json",
help="project credentials file, by default tries to access file "
"from current directory",
)
parser.add_argument(
"--scopes",
dest="scopes",
default=None,
help="authentication scopes as comma separated list",
)
parser.add_argument(
"--service",
dest="service",
default=None,
help="set authentication scopes for the given service, "
"supported services: drive,apps-script",
)
parser.add_argument(
"--console",
dest="console_flow",
action="store_true",
default=False,
help="use to run console based auth flow",
)
return parser.parse_args()
def start():
parser = argparse.ArgumentParser(description="Getting Google OAuth token")
args = get_arguments(parser)
if (
not os.path.exists(args.credentials_file)
or os.stat(args.credentials_file).st_size == 0
):
print(
"WARNING: Credentials file '%s' does not exist or is empty\n"
% args.credentials_file
)
parser.print_help()
return
auth_scopes = []
if args.service and args.service in SERVICE_SCOPES.keys():
auth_scopes.extend(SERVICE_SCOPES[args.service])
if args.scopes:
auth_scopes.extend(args.scopes.split(","))
if not auth_scopes:
print("WARNING: No authentication scopes have been defined!\n")
parser.print_help()
return
googlescopes = [f"https://www.googleapis.com/auth/{scope}" for scope in auth_scopes]
print("Google OAuth Flow for scopes: %s" % (",".join(auth_scopes)))
flow = InstalledAppFlow.from_client_secrets_file(
args.credentials_file, googlescopes
)
if args.console_flow:
credentials = flow.run_console()
else:
credentials = flow.run_local_server()
print(
"\nCopy these credentials into Robocloud Vault:\n%s%s%s"
% (
(40 * "-") + "\n",
str(base64.b64encode(pickle.dumps(credentials)), "utf-8"),
"\n" + (40 * "-") + "\n",
)
)
| 2.96875 | 3 |
rmPython.py | emiliska/unix-commands-python3 | 0 | 12769553 | <gh_stars>0
import sys, os
# How to use:
# python .\mvPython.py '.\mvPython.py' 'D:\USERNAME\DESTINATION_FOLDER...'
for arg in sys.argv[1:]: # loop through arguments from command line
os.remove(arg)
| 2.171875 | 2 |
AutotestWebD/apps/myadmin/service/UserService.py | yangjourney/sosotest | 422 | 12769554 | import apps.common.func.InitDjango
from all_models.models import TbUser, TbAdminUserPermissionRelation
from apps.common.func.WebFunc import *
class UserService(object):
@staticmethod
def getUsers():
return TbUser.objects.all()
@staticmethod
def getUserByLoginname(loginname):
return TbUser.objects.filter(loginName=loginname)
@staticmethod
def updateUser(userData):
tbModel = TbUser.objects.filter(id=userData["id"])
tbModel.update(**userData)
if __name__ == "__main__":
# print(UserService.getUsers()[0])
#permissionDict = UserPermission.getUserPermissions("liyc", "/interfaceTest/HTTP_InterfaceListCheck")
#print(permissionDict)
# print("permissionDict:", permissionDict)
#print("interfaceDict:", interfaceDict)
permissionsList = UserPermission.getOthersPermissions("liyc", ['lining02', 'gaozhe', 'qinjp', 'yongwy', 'pengjie', 'tanglu', 'hongln'], "/interfaceTest/HTTP_GlobalTextConfListPage")
# print("permissionsList:", permissionsList)
# print(UserService.getUserByLoginname(UserService.getUsers()[0].loginName))
| 1.945313 | 2 |
root/ilikeit/RabbitMQCrashCourse/tutorials/workqueues/new_task_durability.py | ChyiYaqing/chyidlTutorial | 5 | 12769555 | <filename>root/ilikeit/RabbitMQCrashCourse/tutorials/workqueues/new_task_durability.py
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
new_task.py to allow arbitrary messages to be sent from the command line. This program will schedule tasks to our work queue.
The main idea behind Work Queues is to avoid doing a resource-intensive task immediately and having to wait for it to complete.
Instead we schedule the task to be done later. We encapsulate a task as a message and send it to the queue. A worker
processer running in the background will pop the tasks and eventually execute the job. When you run many works the tasks
will be shared between them.
This concept is especially useful in web applications where it's impossible to handle a complex task during a short HTTP request window
"""
# Pika is a pure-Python implementation of the AMQP 0-9-1 protocol
import pika
import sys
# guest user can only connect via localhost
#credentials = pika.PlainCredentials('guest', 'guest')
credentials = pika.PlainCredentials('pi', 'macintosh')
connection = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.31.156',
port=5672,
virtual_host='/',
credentials=credentials))
channel = connection.channel()
# make sure that RabbitMQ will never lose our queue.
# RabbitMQ doesn't allow redefine an existing queue with different parameters and will return an error to any program that tries to do that.
channel.queue_declare(queue='task_queue', durable=True)
message = ' '.join(sys.argv[1:]) or "Hello World!"
# make our message as persistent - by supplying a delivery_mode property with a value 2
# default exchange, which we identify by the empty string("")
channel.basic_publish(exchange='',
routing_key='task_queue', # the queue name
body=message,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
))
print("[x] Sent 'Hello World!'")
connection.close()
"""
Please keep in mind that this and other tutorials are, well, tutorials, They demonstrate one new concept at a time and may
intentionally oversimplify some things and leave out others. For example topics such as connection management, error handling,
connection recovery, concurrency and metric collection are largely omitted for the sake of brevity. Such simplified code
should not be considered production ready.
Note on message persistence
Making messages as persistent doesn't fully guarantee that a message won't be lost. Although it tells RabbitMQ to save
the message and hasn't saved it yet. Also, RabbitMQ doesn't do fsync(2) [synchronize a file's in-core state with storage device]
for every message it may be just saved to cache and not really written to the disk.
""" | 2.9375 | 3 |
image_projection.py | rhLahr/CNN_coffee_ring_effect | 0 | 12769556 | import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from load_test_data import load_test_data
from sklearn.metrics import confusion_matrix
from model_result import model_result
import matplotlib.pyplot as plt
import seaborn as sn
import pandas as pd
import pickle
import os
import matplotlib
basewidth = 300
hsize = 300
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential( # input shape (1, 28, 28)
nn.Conv2d(
in_channels=1, # input height
out_channels=16, # n_filters
kernel_size=10, # filter size
stride=1, # filter movement/step
padding=2,
# if want same width and length of this image after Conv2d, padding=(kernel_size-1)/2 if stride=1
), # output shape (16, 28, 28)
nn.ReLU(), # activation
nn.MaxPool2d(kernel_size=5), # choose max value in 2x2 area, output shape (16, 14, 14)
)
self.conv2 = nn.Sequential( # input shape (16, 14, 14)
nn.Conv2d(16, 32, 10, 1, 2), # output shape (32, 14, 14)
nn.ReLU(), # activation
nn.MaxPool2d(5), # output shape (32, 7, 7)
)
self.linear1 = nn.Linear(128, 500)
self.linear2 = nn.Linear(500, 30)
self.out = nn.Linear(30, 6)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
x = self.relu(x)
x = self.linear1(x)
x = self.relu(x)
x = self.linear2(x)
x = self.relu(x)
output = self.out(x)
return output, x # return x for visualization
# model_name = input('Please input model name:')
# model_name = model_name + '.pkl'
# model_name = '1.pkl'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dirName = 'Projection_images'
try:
# Create target Directory
os.mkdir(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
for i in range(10):
model_name = str(i+1) + '.pkl'
net = torch.load(model_name)
model_name_pro = str(i+1)
dirName = 'Projection_images/' + 'model_' + model_name_pro + '_first_layer_pro'
try:
# Create target Directory
os.mkdir(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
train_x_file = ''
if train_x_file == '':
# train_x_file = 'train_table_1_4'
train_x_file = 'training'
train_x_file = train_x_file + '.pkl'
data = pickle.load(open(train_x_file, "rb"))
data = torch.from_numpy(data)
data = data.type(torch.FloatTensor)
for img_num in range(data.shape[0]):
img = data[img_num, :, :, :]
img = img.reshape(1, 1, basewidth, hsize)
img = img.to(device)
layer_1 = net.conv1(img)
layer_1 = layer_1.cpu() # 16 filters
for i in range(layer_1.shape[1]):
layer_1_1 = layer_1[0, i, :, :]
layer_1_1 = layer_1_1.detach().numpy()
matplotlib.use('Agg')
fig, ax = plt.subplots()
filename = dirName + '/' + 'image_' + str(img_num+1) + '_filer_' + str(i+1) + '.jpg'
plt.imshow(layer_1_1)
# plt.show()
fig.savefig(filename)
plt.close()
| 3.0625 | 3 |
base_solver/pytorch-captcha-recognition/my_dataset.py | johnnyzn/DW-GAN | 109 | 12769557 | <gh_stars>100-1000
# -*- coding: UTF-8 -*-
import os
from torch.utils.data import DataLoader,Dataset
import torchvision.transforms as transforms
from PIL import Image
import one_hot_encoding as ohe
import captcha_setting
import numpy as np
import cv2
class mydataset(Dataset):
def __init__(self, folder, folder_2 = None, transform=None):
self.train_image_file_paths = [os.path.join(folder, image_file) for image_file in os.listdir(folder)]
if(folder_2 is not None):
self.train_image_file_paths = self.train_image_file_paths + [os.path.join(folder_2, image_file) for image_file in os.listdir(folder_2)]
print(len(self.train_image_file_paths))
self.transform = transform
def __len__(self):
return len(self.train_image_file_paths)
def __getitem__(self, idx):
image_root = self.train_image_file_paths[idx]
image_name = image_root.split('/')[-1]
image = Image.open(image_root)
#print(image)
fix_size = (160, 60)
image = image.resize(fix_size)
# print(image_name)
if self.transform is not None:
image = self.transform(image)
# print(image_name)
if('_' in image_name):
label = ohe.encode(image_name.split('_')[0].upper())
else:
label = ohe.encode(image_name.split('.')[0].upper())
return image, label, image_name
def gaussian_blur(img):
image = np.array(img)
image_blur = cv2.GaussianBlur(image,(5,5),3)
new_image = image_blur
return new_image
transform = transforms.Compose([
# transforms.ColorJitter(),
transforms.Grayscale(),
# transforms.Lambda(gaussian_blur),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.9], std=[0.4]),
])
def get_train_data_loader(s=True,d=200):
print('data path: ', captcha_setting.TRAIN_DATASET_PATH)
# dataset = mydataset(captcha_setting.TRAIN_DATASET_PATH, captcha_setting.TRAIN_DATASET_PATH_2, transform=transform)
dataset = mydataset(captcha_setting.TRAIN_DATASET_PATH, transform=transform)
return DataLoader(dataset, batch_size=512, shuffle=s)
def get_test_train_data_loader(s=True,d=256):
dataset = mydataset(captcha_setting.TRAIN_DATASET_PATH, transform=transform)
return DataLoader(dataset, batch_size=d, shuffle=s)
def get_test_data_loader(s=False,d=1):
print(captcha_setting.TEST_DATASET_PATH)
dataset = mydataset(captcha_setting.TEST_DATASET_PATH, transform=transform)
return DataLoader(dataset, batch_size=d, shuffle=s)
def get_predict_data_loader(s=True,d=1):
dataset = mydataset(captcha_setting.PREDICT_DATASET_PATH, transform=transform)
return DataLoader(dataset, batch_size=d, shuffle=s) | 2.546875 | 3 |
dfvfs/resolver_helpers/cpio_resolver_helper.py | dfrc-korea/dfvfs | 1 | 12769558 | # -*- coding: utf-8 -*-
"""The CPIO path specification resolver helper implementation."""
from dfvfs.file_io import cpio_file_io
from dfvfs.lib import definitions
from dfvfs.resolver_helpers import manager
from dfvfs.resolver_helpers import resolver_helper
from dfvfs.vfs import cpio_file_system
class CPIOResolverHelper(resolver_helper.ResolverHelper):
"""CPIO resolver helper."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_CPIO
def NewFileObject(self, resolver_context):
"""Creates a new file-like object.
Args:
resolver_context (Context): resolver context.
Returns:
FileIO: file-like object.
"""
return cpio_file_io.CPIOFile(resolver_context)
def NewFileSystem(self, resolver_context):
"""Creates a new file system object.
Args:
resolver_context (Context): resolver context.
Returns:
FileSystem: file system.
"""
return cpio_file_system.CPIOFileSystem(resolver_context)
# Register the resolver helpers with the resolver.
manager.ResolverHelperManager.RegisterHelper(CPIOResolverHelper())
| 2.03125 | 2 |
stacker/hooks/route53.py | chrishenry/stacker | 0 | 12769559 | <reponame>chrishenry/stacker
import logging
logger = logging.getLogger(__name__)
from boto.route53 import connect_to_region
from stacker.util import create_route53_zone
def create_domain(region, namespace, mappings, parameters, **kwargs):
conn = connect_to_region(region)
domain = kwargs.get('domain', parameters.get('BaseDomain'))
if not domain:
logger.error("domain argument or BaseDomain parameter not provided.")
return False
create_route53_zone(conn, domain)
return True
| 2.421875 | 2 |
tests/storage/util/test_paths.py | cdknorow/modelstore | 0 | 12769560 | <gh_stars>0
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from datetime import datetime
import pytest
from modelstore.storage.util import paths
# pylint: disable=protected-access
@pytest.mark.parametrize("has_root_prefix", [(True), (False)])
def test_get_archive_path(tmp_path, has_root_prefix):
root = str(tmp_path) if has_root_prefix else ""
prefix = datetime.now().strftime("%Y/%m/%d/%H:%M:%S")
exp = os.path.join(
root, paths.MODELSTORE_ROOT_PREFIX, "domain", prefix, "file-name"
)
res = paths.get_archive_path(root, "domain", "path/to/file-name")
assert exp == res
@pytest.mark.parametrize("has_root_prefix", [(True), (False)])
def test_get_models_path_no_root_prefix(tmp_path, has_root_prefix):
root = str(tmp_path) if has_root_prefix else ""
exp = os.path.join(root, paths.MODELSTORE_ROOT_PREFIX, "example-domain", "versions")
res = paths.get_models_path(root, "example-domain")
assert exp == res
@pytest.mark.parametrize("has_root_prefix", [(True), (False)])
def test_get_models_path_with_state(tmp_path, has_root_prefix):
root = str(tmp_path) if has_root_prefix else ""
exp = os.path.join(
root, paths.MODELSTORE_ROOT_PREFIX, "example-domain", "versions", "prod"
)
res = paths.get_models_path(root, "example-domain", "prod")
assert exp == res
@pytest.mark.parametrize("has_root_prefix", [(True), (False)])
def test_get_domains_path(tmp_path, has_root_prefix):
root = str(tmp_path) if has_root_prefix else ""
exp = os.path.join(root, paths.MODELSTORE_ROOT_PREFIX, "domains")
res = paths.get_domains_path(root)
assert exp == res
@pytest.mark.parametrize("has_root_prefix", [(True), (False)])
def test_get_domain_path(tmp_path, has_root_prefix):
root = str(tmp_path) if has_root_prefix else ""
exp = os.path.join(paths.MODELSTORE_ROOT_PREFIX, "domains", "domain.json")
res = paths.get_domain_path("", "domain")
assert exp == res
@pytest.mark.parametrize("has_root_prefix", [(True), (False)])
def test_get_model_states_path(tmp_path, has_root_prefix):
root = str(tmp_path) if has_root_prefix else ""
exp = os.path.join(root, paths.MODELSTORE_ROOT_PREFIX, "model_states")
res = paths.get_model_states_path(root)
assert exp == res
@pytest.mark.parametrize("has_root_prefix", [(True), (False)])
def test_get_model_state_path(tmp_path, has_root_prefix):
root = str(tmp_path) if has_root_prefix else ""
exp = os.path.join(root, paths.MODELSTORE_ROOT_PREFIX, "model_states", "prod.json")
res = paths.get_model_state_path(root, "prod")
assert exp == res
| 2.015625 | 2 |
src/astro/files/types/ndjson.py | astro-projects/astro | 71 | 12769561 | import io
import json
from typing import Optional
import pandas as pd
from astro.constants import DEFAULT_CHUNK_SIZE
from astro.constants import FileType as FileTypeConstants
from astro.files.types.base import FileType
class NDJSONFileType(FileType):
"""Concrete implementation to handle NDJSON file type"""
def export_to_dataframe(self, stream, **kwargs):
"""read ndjson file from one of the supported locations and return dataframe
:param stream: file stream object
"""
return NDJSONFileType.flatten(self.normalize_config, stream)
def create_from_dataframe(self, df: pd.DataFrame, stream: io.TextIOWrapper) -> None:
"""Write ndjson file to one of the supported locations
:param df: pandas dataframe
:param stream: file stream object
"""
df.to_json(stream, orient="records", lines=True)
@property
def name(self):
return FileTypeConstants.NDJSON
@staticmethod
def flatten(
normalize_config: Optional[dict], stream: io.TextIOWrapper
) -> pd.DataFrame:
"""
Flatten the nested ndjson/json.
:param normalize_config: parameters in dict format of pandas json_normalize() function.
https://pandas.pydata.org/docs/reference/api/pandas.json_normalize.html
:param stream: io.TextIOWrapper object for the file
:type normalize_config: dict
:type stream: io.TextIOWrapper
:return: return dataframe containing the loaded data
:rtype: `pandas.DataFrame`
"""
normalize_config = normalize_config or {}
df = None
rows = stream.readlines(DEFAULT_CHUNK_SIZE)
while len(rows) > 0:
if df is None:
df = pd.DataFrame(
pd.json_normalize(
[json.loads(row) for row in rows], **normalize_config
)
)
rows = stream.readlines(DEFAULT_CHUNK_SIZE)
return df
| 2.84375 | 3 |
Task2C.py | Talha1337/ia-flood-risk-project-94- | 0 | 12769562 | <reponame>Talha1337/ia-flood-risk-project-94-<gh_stars>0
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level
stations = build_station_list()
update_water_levels(stations)
print(stations_highest_rel_level(stations,10)) #list of 10 stations with the highest level | 2.28125 | 2 |
test/mgen.py | I-Rinka/build-cpu-within-20days | 5 | 12769563 | <reponame>I-Rinka/build-cpu-within-20days
import random
test_reg = ("$0", "$1", "$2", "$16", "$26", "$27")
def get_reg():
return test_reg[random.randint(0, len(test_reg)-1)]
imm_inst = ("lui", "ori", "andi", "addi", "addiu")
def get_imm_inst():
ins = imm_inst[random.randint(0, len(imm_inst)-1)]
rs = get_reg()
rt = get_reg()
imm = hex(random.randint(0, 100))
if ins == "lui":
return ins+" "+rs+","+imm
return ins+" "+rs+","+rt+","+imm
tri_inst = ("add", "sub", "addu", "subu", "srlv", "sllv")
def get_tri_inst():
ins = tri_inst[random.randint(0, len(tri_inst)-1)]
rs = get_reg()
rt = get_reg()
rd = get_reg()
return ins+" "+rd+","+rs+","+rt
addr = ("0x1000", "0x1234", "0x2020")
def get_sl():
# rt = "lui $3,"+addr[random.randint(0, len(addr)-1)]+"\n"
rt = "lui $3,"+"0x0000"+"\n"
rt += "ori $3,"+addr[random.randint(0, len(addr)-1)]+"\n"
if random.randint(0, 1) == 0: # lw
rt += "lw "+get_reg()+","+"0x0($3)"
else: # sw
rt += "sw "+get_reg()+","+"0x0($3)"
return rt
# j系指令容易炸,建议人为添加
for ins in range(50):
r = random.randint(0,100)
if r>=0 and r<=40:
print(get_imm_inst())
# elif r>40 and r<=80:
else:
print(get_tri_inst())
# else:
# print(get_sl())
| 2.1875 | 2 |
utils/det_filter.py | rishyak/liverseg-2017-nipsws | 107 | 12769564 | <gh_stars>100-1000
import numpy as np
from scipy import misc
import os
import scipy.io
from PIL import Image
def filter(base_root, crops_list='crops_LiTS_gt.txt', input_config='masked_out_lesion', results_list='detection_lesion_example', th=0.5):
crops_list = base_root + 'utils/crops_list/' + crops_list
results_list = base_root + 'detection_results/' + results_list + '/soft_results.txt'
if crops_list is not None:
with open(crops_list) as t:
crops_lines = t.readlines()
input_results_path = base_root + 'results/' + input_config
output_results_path = base_root + 'results/det_' + input_config
if not os.path.exists(os.path.join(output_results_path)):
os.makedirs(os.path.join(output_results_path))
if results_list is not None:
with open(results_list) as t:
results_lines = t.readlines()
for i in range(105, 131):
folder_name = str(i)
images = []
nm = folder_name + '/'
for x in results_lines:
if nm in x:
images.append(x)
slices_names = []
if not os.path.exists(os.path.join(output_results_path, folder_name)):
os.makedirs(os.path.join(output_results_path, folder_name))
for j in range(len(images)):
slices_names.append(images[j].split()[0])
unique_slices_names = np.unique(slices_names)
for x in range(len(unique_slices_names)):
total_mask = []
for l in range(len(slices_names)):
if slices_names[l] == unique_slices_names[x]:
if float(images[l].split()[3]) > th:
aux_mask = np.zeros([512, 512])
x_bb = int(float(images[l].split()[1]))
y_bb = int(float(images[l].split()[2].split('\n')[0]))
aux_name = images[l].split()[0] + '.png'
total_patch = (np.array(Image.open(os.path.join(input_results_path, aux_name)), dtype=np.uint8))/255.0
cropped_patch = total_patch[x_bb: (x_bb + 80), y_bb:(y_bb + 80)]
aux_mask[x_bb: (x_bb + 80), y_bb:(y_bb + 80)] = cropped_patch
total_mask.append(aux_mask)
if len(total_mask) > 0:
if len(total_mask) > 1:
summed_mask = np.sum(total_mask, axis=0)
else:
summed_mask = np.array(total_mask)[0]
thresholded_total_mask = np.greater(total_mask, 0.0).astype(float)
summed_thresholded_total_mask = np.sum(thresholded_total_mask, axis= 0)
summed_thresholded_total_mask[summed_thresholded_total_mask == 0.0] = 1.0
summed_mask = np.divide(summed_mask, summed_thresholded_total_mask)
summed_mask = summed_mask*255.0
name = unique_slices_names[x].split('.')[0] + '.png'
scipy.misc.imsave(os.path.join(output_results_path, name), summed_mask)
for i in range(len(crops_lines)):
result = crops_lines[i].split(' ')
if len(result) > 2:
id_img, bool_zoom, mina, maxa, minb, maxb = result
else:
id_img, bool_zoom = result
if int(id_img.split('/')[-2]) > 104:
if not os.path.exists(os.path.join(output_results_path, id_img + '.png')):
mask = np.zeros([512, 512])
misc.imsave(os.path.join(output_results_path, id_img + '.png'), mask)
| 2.265625 | 2 |
instaboost/exceptions.py | twangnh/Calibration_mrcnn | 87 | 12769565 | class TrimapError(Exception):
"""
Error when creating matting trimap.
"""
def __init__(self, err):
super(TrimapError, self).__init__(err)
class AnnError(Exception):
"""
Error with Input annotation.
"""
def __init__(self, err):
super(AnnError, self).__init__(err)
| 2.640625 | 3 |
Joy_QA_Platform/ApiManager/operations/operation_task.py | bzc128/Joy_QA_Platform | 123 | 12769566 | import datetime
import json
import re
import os
import requests
import time
import threading
import pickle
from django.core.mail import send_mail
from django.db import connection
from django.http import JsonResponse
from django.shortcuts import render_to_response, render
from django.core.cache import cache
from ApiManager.utils import schedule
from ApiManager.utils.case_utils import run_case_by_id
from ApiManager.utils.forms import TaskModelForm
from ApiManager.models import ProjectInfo, ModuleInfo, TestCaseInfo, EnvInfo, TaskInfo, ReportInfo, TaskFailedRecord
from frame.utils.common import get_ajax_msg, dataToJson
from ApiManager.utils.forms import get_validate_form_msg
from ApiManager.utils.utils import pagination_for_objects
from Joy_QA_Platform.settings import EMAIL_FROM
from Joy_QA_Platform.configs import AUTH_ADD_TASK, AUTH_DELETE, AUTH_UPDATE, AUTH_VIEW, EMAIL_SUFFIX
is_timer_start = False
run_task_list = []
run_job_dict = {}
def task_list(request):
if request.method == "GET":
return render(request, 'api/task_list.html')
elif request.method == "POST":
index = int(request.POST.get('index'))
env_name_dic = {}
project_name_dic = {}
module_name_dic = {}
results = filter_tasks_for_user(request.user, TaskInfo.objects.filter().order_by('-id'), AUTH_VIEW)
tasks = pagination_for_objects(results, index)
if tasks is not None and len(tasks) > 0:
for task in tasks:
append_env_dict(task, env_name_dic)
append_project_dict(task, project_name_dic)
append_module_dict(task, module_name_dic)
count = len(results)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '获取任务列表成功', {'tasks': data, 'count': count, 'currPage': index,
'envInfo': env_name_dic,
'proInfo': project_name_dic,
'moduleInfo': module_name_dic}))
def task_create(request):
if request.method == 'GET':
return render(request, 'api/task_new.html')
elif request.user.has_perm(AUTH_ADD_TASK):
if request.method == 'POST':
model_form = TaskModelForm(request.POST)
if model_form.is_valid():
task_name = request.POST.get('task_name')
env_id = request.POST.get('belong_env')
project_id = request.POST.get('belong_project')
module_id = request.POST.get('belong_module')
emails = request.POST.get('receiver_email')
start_time = datetime.datetime.fromtimestamp(int(request.POST.get('start_time')) / 1000)
if request.POST.get('is_loop') == 'true':
is_loop = True
elif request.POST.get('is_loop') == 'false':
is_loop = False
interval_minute = request.POST.get('interval_minute')
error_msg = None
if not EnvInfo.objects.filter(id=env_id).exists():
error_msg = '此环境不存在'
elif not ProjectInfo.objects.filter(id=project_id).exists():
error_msg = '此项目不存在'
elif not ModuleInfo.objects.filter(id=module_id).exists():
error_msg = '此模块不存在'
elif TaskInfo.objects.filter(task_name=task_name, belong_module_id=module_id).exists():
error_msg = '已存在此任务'
elif start_time <= datetime.datetime.now():
error_msg = '任务开始时间早于当前时间'
elif is_loop and int(interval_minute) < 1:
error_msg = '任务开始循环间隔时间不能小于1分钟'
elif not validate_emails(emails.split(';')):
error_msg = '邮箱格式错误'
if error_msg is not None:
return JsonResponse(get_ajax_msg(0, 0, error_msg, {}))
model_form.instance.belong_env_id = env_id
model_form.instance.belong_project_id = project_id
model_form.instance.belong_module_id = module_id
model_form.instance.start_time = start_time
model_form.instance.receiver_email = deal_emails(emails.split(';'))
model_form.save()
for case_id in request.POST.get('case_list').split(','):
task = TaskInfo.objects.get(task_name=request.POST.get('task_name'))
case = TestCaseInfo.objects.get(id=case_id)
task.cases.add(case)
return JsonResponse(get_ajax_msg(1, 1, '添加任务成功', {}))
else:
msg = get_validate_form_msg(model_form)
return JsonResponse(get_ajax_msg(0, 0, msg))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有创建任务的权限'))
def task_search(request):
if request.method == 'POST':
index = int(request.POST.get('index'))
task_name = request.POST.get('task_name')
project_name = request.POST.get('project_name')
module_name = request.POST.get('module_name')
tasks = None
env_name_dic = {}
project_name_dic = {}
module_name_dic = {}
count = 0
if len(task_name) == 0 and len(project_name) == 0 and len(module_name) == 0:
return JsonResponse(get_ajax_msg(0, 0, '搜索条件无效'))
else:
tasks = TaskInfo.objects.all()
if len(module_name) != 0 and module_name != '模块名称':
tasks = tasks.filter(belong_module__module_name__contains=module_name)
if len(project_name) != 0 and project_name != '项目名称':
tasks = tasks.filter(belong_project__project_name__contains=project_name)
if len(task_name) != 0:
tasks = tasks.filter(task_name__contains=task_name)
if tasks == None:
return JsonResponse(get_ajax_msg(0, 0, '查询出错'))
if tasks != None and len(tasks) > 0:
tasks = filter_tasks_for_user(request.user, tasks.order_by('-id'), AUTH_VIEW) # 根据用户权限筛选模块
for task in tasks:
append_env_dict(task, env_name_dic)
append_project_dict(task, project_name_dic)
append_module_dict(task, module_name_dic)
count = len(tasks)
tasks = pagination_for_objects(tasks, index)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '搜索成功', {'tasks': data, 'count': count, 'currPage': index,
'envInfo': env_name_dic, 'proInfo': project_name_dic,
'moduleInfo': module_name_dic}))
def task_delete(request):
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
if check_perm(request.user, tasks[0], AUTH_DELETE):
tasks[0].delete()
return JsonResponse(get_ajax_msg(1, 1, '删除成功', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有删除该任务的权限'))
def task_query(request):
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
tasks = filter_tasks_for_user(request.user, tasks, AUTH_VIEW)
task_info_list = []
for task in tasks:
task_dict = task2Dict(task)
task_info_list.append(task_dict)
data = dataToJson(task_info_list)
return JsonResponse(get_ajax_msg(1, 1, '获取任务成功', {'tasks': data}))
def task_update(request):
if request.method == 'POST':
task_form = TaskModelForm(request.POST)
if task_form.is_valid():
task_id = request.POST.get('id')
task_name = request.POST.get('task_name')
env_name = request.POST.get('env_name')
project_name = request.POST.get('project_name')
module_name = request.POST.get('module_name')
receiver_email = request.POST.get('receiver_email')
case_list = request.POST.get('case_list').split(',')
start_time = datetime.datetime.fromtimestamp(int(request.POST.get('start_time')) / 1000)
interval_minute = request.POST.get('interval_minute')
if request.POST.get('is_loop') == 'true':
is_loop = True
if int(interval_minute) < 1:
return JsonResponse(get_ajax_msg(0, 0, '循环间隔时间不能小于1分钟', {}))
elif request.POST.get('is_loop') == 'false':
is_loop = False
if start_time <= datetime.datetime.now():
start_time = datetime.datetime.now()
# return JsonResponse(get_ajax_msg(0, 0, '任务开始时间早于当前时间', {}))
if not validate_emails(receiver_email.split(';')):
return JsonResponse(get_ajax_msg(0, 0, '邮箱格式错误'))
# print(deal_emails(receiver_email.split(';')))
try:
task = TaskInfo.objects.get(id=task_id)
if TaskInfo.objects.filter(task_name=task_name,belong_module_id=module_name).exclude(id=task_id).exists():
return JsonResponse(get_ajax_msg(0, 0, '已存在此任务名称', {}))
if not task.is_run:
if check_perm(request.user, TaskInfo.objects.get(id=task_id), AUTH_UPDATE):
if TaskInfo.objects.update_task(task_id, task_name=task_name, env_name=env_name,
project_name=project_name,
module_name=module_name, receiver_email=deal_emails(receiver_email.split(';')),
case_list=case_list,
start_time=start_time, is_loop=is_loop,
interval_minute=interval_minute):
return JsonResponse(get_ajax_msg(1, 1, '修改任务成功', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '修改任务失败', {}))
else:
return JsonResponse(get_ajax_msg(0, 0, '用户没有修改该任务的权限'))
else:
return JsonResponse(get_ajax_msg(0, 0, '请先停止任务', {}))
except:
return JsonResponse(get_ajax_msg(0, 0, '该任务不存在', {}))
else:
msg = get_validate_form_msg(task_form)
return JsonResponse(get_ajax_msg(0, 1, msg))
def task_run(request):
global is_timer_start
global run_task_list
global run_job_dict
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
task = tasks[0]
if not task.is_run:
if task.start_time > datetime.datetime.now(): # 任务开始时间必须大于当前时间
pass
else:
task.start_time = datetime.datetime.now() + datetime.timedelta(seconds=10)
# if not is_timer_start:
# is_timer_start = True
# start_task_timer = StartTaskTimer(run_task_list, run_job_dict)
# start_task_timer.start()
run_task_list.append(task)
task.is_run = True
task.save()
connection.close()
return JsonResponse(get_ajax_msg(1, 1, '该任务成功运行'))
else:
connection.close()
return JsonResponse(get_ajax_msg(0, 0, '该任务正在运行'))
def task_stop(request):
global run_task_list
global run_job_dict
if request.method == 'POST':
task_id = request.POST.get('id')
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) == 0:
return JsonResponse(get_ajax_msg(0, 0, '没有这条数据', {}))
task = tasks[0]
if task.is_run:
task.is_run = False
task.fail_times = 0
task.save()
# if task in run_task_list:
# run_task_list.remove(task) # 从运行任务列表中删除该任务
try:
# jobs = run_job_dict[task.id]
# for job in jobs:
schedule.cancel_job(task.id)
except KeyError:
print('非循环任务')
return JsonResponse(get_ajax_msg(1, 1, '该任务成功停止'))
else:
return JsonResponse(get_ajax_msg(0, 0, '该任务没有运行'))
def task_monitor(request):
if request.method == 'GET':
return render(request, 'api/task_monitor.html')
if request.method == 'POST':
index = int(request.POST.get('index'))
search_task_name = request.POST.get('task_name')
start = (index - 1) * 10
res = requests.get('http://127.0.0.1:5555/api/tasks?limit=1000') # 控制查询最大数目为1000,以解决查询卡顿的问题
results = json.loads(res.content)
monitor_result_list = []
for result in results.values():
try:
task_dict = {}
args = result['args'].split(',')
# 获取任务信息
infos = args[1].split('-')
if '定时任务' in infos[0]:
task_name = infos[1]
case_name = infos[2]
report_uuid = args[4].split("'")[1]
task_dict['task_name'] = task_name
task_dict['case_name'] = case_name
task_dict['state'] = result['state']
task_dict['result'] = result['result']
task_dict['received'] = result['received']
task_dict['started'] = result['started']
task_dict['runtime'] = result['runtime']
task_dict['report_uuid'] = report_uuid
if search_task_name is not None:
if search_task_name in task_dict['task_name']:
monitor_result_list.append(task_dict)
else:
monitor_result_list.append(task_dict)
except Exception as e:
print('数据解析异常:' + e)
# 根据任务开始时间降序排列
for i in range(len(monitor_result_list) - 1):
for j in range(len(monitor_result_list) - i - 1):
if monitor_result_list[j]['received'] < monitor_result_list[j + 1]['received']:
monitor_result_list[j], monitor_result_list[j + 1] = monitor_result_list[j + 1], monitor_result_list[j]
data = dataToJson(monitor_result_list[start: start + 10])
return JsonResponse(get_ajax_msg(1, 1, '获取监控任务列表成功', {'monitors': data, 'count': len(monitor_result_list), 'currPage': index}))
def thread_run_case(**kwargs):
case_id = kwargs['case_id']
base_url = kwargs['base_url']
task_name = kwargs['task_name']
task_id = kwargs['task_id']
threading.Thread(target=run_case, args=(base_url, case_id, task_name, task_id)).start()
def run_case(base_url, case_id, task_name, task_id):
report_id = run_case_by_id(base_url, case_id, task_name,"定时任务",isTask=True)
time.sleep(5) # 等待报告信息写入数据库
reports = ReportInfo.objects.all().filter(report_id=report_id)
tasks = TaskInfo.objects.filter(id=task_id)
if len(tasks) > 0:
task = tasks[0]
if len(reports) == 0:
# 若没有此条报告,则认为用例成功,不再需要后续操作
if len(tasks) > 0:
task.fail_times = 0
task.save()
else:
response_result = get_response_result(report_id)
if response_result != True:
task.fail_times += 1
task.save()
# 存失败记录
failRecord = TaskFailedRecord(task_id=task,report_id=reports[0].id,time=datetime.datetime.fromtimestamp(reports[0].test_time))
failRecord.save()
if task.fail_times % 2 == 0 and task.fail_times != 0:
receivers = task.receiver_email.split(';')
for receiver in receivers:
send_warn_mail(task_name, receiver, reports[0].id)
connection.close() # 避免造成mysql连接数过多的问题
def get_response_result(report_id):
response_result = True
try:
reports = ReportInfo.objects.all().filter(report_id=report_id)
if len(reports) > 0:
report = reports[0]
# print(report.result_data)
summury = json.loads(report.result_data)
stat = summury['stat']
if stat['successes'] != stat['testsRun']:
response_result = False
except Exception as e:
print('get_response_code e=====>', e)
return response_result
def send_warn_mail(task_name, receiver, report_id):
tips = task_name + ':监控到接口发生异常!查看报告地址:http://qa.15166.com/api/get_report/?id=' + str(report_id)
try:
email_title = "Joy_QA_Platform 定时任务监控接口"
email_body = tips
# 使用Django内置函数完成邮件发送。四个参数:主题,邮件内容,从哪里发,接受者list
send_status = send_mail(email_title, email_body, EMAIL_FROM, [receiver])
except Exception as e:
print(e)
def task2Dict(task):
task_dict = {}
task_dict["id"] = task.id
task_dict["task_name"] = task.task_name
task_dict["belong_env"] = task.belong_env_id
task_dict["belong_project"] = task.belong_project_id
task_dict["belong_module"] = task.belong_module_id
task_dict["receiver_email"] = task.receiver_email
task_dict["case_id_list"] = []
task_dict["case_name_list"] = []
task_dict["start_time"] = task.start_time
task_dict["is_loop"] = task.is_loop
task_dict["interval_minute"] = task.interval_minute
task_dict["is_run"] = task.is_run
task_dict["fail_times"] = task.fail_times
cases = task.cases.all()
for case in cases:
id = case.id
task_dict["case_id_list"].append(case.id)
task_dict["case_name_list"].append(case.name)
return task_dict
def append_env_dict(task, env_dict):
env_id = task.belong_env_id
env_name = task.belong_env.env_name
env_dict[str(env_id)] = env_name
def append_project_dict(task, project_dict):
project_id = task.belong_project_id
project_name = task.belong_project.project_name
project_dict[str(project_id)] = project_name
def append_module_dict(task, module_dict):
module_id = task.belong_module_id
module_name = task.belong_module.module_name
module_dict[str(module_id)] = module_name
def get_url_from_task(task):
envs = EnvInfo.objects.filter(id=task.belong_env_id)
env = envs[0]
return env.host_port
class StartTaskTimer(threading.Thread):
def __init__(self, run_task_list, run_job_dict):
threading.Thread.__init__(self)
self.run_task_list = run_task_list
self.run_job_dict = run_job_dict
def run(self):
while True:
# lst = self.run_task_list[::]
tasks = get_running_tasks()
for task in tasks:
now = datetime.datetime.now()
if task.start_time <= now <= (task.start_time + datetime.timedelta(seconds=5)):
if task.is_loop:
self.run_job_dict[task.id] = start_loop_task(task, thread_run_case)
else:
start_task(task, thread_run_case)
task.is_run = False
task.fail_times = 0
task.save()
# self.run_task_list.remove(task)
else:
pass
time.sleep(5)
mutex = threading.Lock()
def get_running_tasks():
global mutex
with mutex:
result = []
tasks = TaskInfo.objects.filter(is_run=True,is_loop=True)
now = datetime.datetime.now()
for task in tasks:
# 排除可能的重复执行
if task.start_time <= now <= (task.start_time + datetime.timedelta(seconds=5)) and (now - task.last_run_time > datetime.timedelta(seconds=5)):
result.append(task)
task.last_run_time = now
task.save()
# if datetime.datetime.now() - task.last_run_time > datetime.timedelta(seconds=task.interval_minute * 60 - 5):
# result.append(task)
connection.close()
if len(result) > 0:
for i in result:
print("获取到任务:",i.task_name)
return result
def start_loop_task(task, func):
base_url = get_url_from_task(task)
jobs = []
cases = task.cases.all()
for case in cases:
task_name = get_task_name(task, case)
func(case_id=case.id, base_url=base_url, task_name=task_name, task_id=task.id)
job = schedule.every(task.interval_minute).minutes.do(thread_run_case, case_id=case.id,
base_url=base_url, task_name=task_name, task_id=task.id)
cache.set("qa_paltform_loop_jobs_"+str(datetime.datetime.now()),pickle.dumps(job),timeout=None)
flag = cache.get("qa_test_platform_running_flag")
# print("flag==="+str(flag))
if flag != 1:
schedule.run_continuously()
# 一定要添加过期时间,否则当值过期时还会起新的线程(发现默认过期时间5分钟,这是django-redis组件和原生redis的区别)
cache.set("qa_test_platform_running_flag",1,timeout=None)
return jobs
def start_task(task, func):
base_url = get_url_from_task(task)
cases = task.cases.all()
for case in cases:
task_name = get_task_name(task, case)
func(case_id=case.id, base_url=base_url, task_name=task_name, task_id=task.id)
def get_task_name(task, case):
name = '定时任务' + '-' + task.task_name + '-' + case.name
return name
def filter_tasks_for_user(user, tasks, perm):
results = []
for task in tasks:
project = task.belong_project
if user.has_perm(perm, project):
results.append(task)
return results
def check_perm(user, task, perm):
project = task.belong_project
return user.has_perm(perm, project)
def restart_running_task():
# 清除redis中的任务缓存
cache.delete_pattern("qa_paltform_loop_jobs_*")
# 清除redis中的分布式锁,避免偶发的锁出现问题,任务会在执行器中的run_pending阻塞
cache.delete_pattern('*qa_test_platform_get')
# 增加是否已经启动了线程的标记,避免每增加一个执行任务就启动一次线程,可能导致任务重复执行
cache.delete_pattern('qa_test_platform_running_flag')
print("清除任务缓存、清除锁、清除线程启动标记")
start_task_timer = StartTaskTimer(run_task_list, run_job_dict)
start_task_timer.start()
tasks = TaskInfo.objects.filter(is_run=True, is_loop=True)
count = 0
for task in tasks:
task.start_time = datetime.datetime.now() + datetime.timedelta(seconds=10*(count+1))
task.save()
count = count + 1
connection.close() # 避免造成mysql连接数过多的问题
def validate_emails(emails):
for email in emails:
if len(email) == 0:
continue
if re.match("^[A-Z0-9a-z._%+-]+" + EMAIL_SUFFIX, email) is None:
return False
return True
def deal_emails(emails):
result = []
for email in emails:
if email not in result:
result.append(email)
resultEmail = ""
for email in result:
resultEmail = resultEmail + ";" + email
return resultEmail[1:]
| 1.953125 | 2 |
test_integration/test_R3_protocol.py | rgaensler/gcode | 0 | 12769567 | <filename>test_integration/test_R3_protocol.py<gh_stars>0
import pytest
from src.ApplicationExceptions import ErrorDispatch
from src.Coordinate import Coordinate
from src.MelfaCoordinateService import MelfaCoordinateService
from src.clients.TcpClientR3 import TcpClientR3
from src.clients.TcpEchoServer import ConfigurableEchoServer
from src.protocols.R3Protocol import R3Protocol
# Parameters
VALID_HOST, VALID_PORT = 'localhost', 10002
JOINTS = ['J{}'.format(i) for i in range(1, 7)]
@pytest.fixture
def echo_server() -> ConfigurableEchoServer:
# Using the context manager to open and shutdown the communication for each test
with ConfigurableEchoServer(VALID_HOST, VALID_PORT, 'utf-8') as new_server:
yield new_server
@pytest.fixture
def valid_client():
# Using the context manager to open and close the communication for each test
with TcpClientR3(host=VALID_HOST, port=VALID_PORT, timeout=3) as client:
yield client
@pytest.fixture
def protocol(valid_client):
return R3Protocol(valid_client, MelfaCoordinateService(), joints=6, digits=2)
@pytest.mark.parametrize("prefix,exc", [(p, e) for p, e in ErrorDispatch.items()])
@pytest.mark.usefixtures('echo_server')
class TestR3ProtocolUtil:
@staticmethod
def execute_report_failures(echo_server, func, prefix, exc, silencing_errors=False):
"""
Test macro for getting float responses
"""
echo_server.reconfigure(pre=prefix, msg='')
if exc is None or silencing_errors:
func()
else:
with pytest.raises(exc):
func()
def test_reset_alarm(self, protocol, echo_server, prefix, exc):
self.execute_report_failures(echo_server, protocol.reset_alarm, prefix, exc, silencing_errors=True)
def test_activate_servo(self, protocol, echo_server, prefix, exc):
self.execute_report_failures(echo_server, protocol.activate_servo, prefix, exc)
def test_deactivate_servo(self, protocol, echo_server, prefix, exc):
self.execute_report_failures(echo_server, protocol.deactivate_servo, prefix, exc)
def test_obtain_control(self, protocol, echo_server, prefix, exc):
self.execute_report_failures(echo_server, protocol.obtain_control, prefix, exc)
def test_release_control(self, protocol, echo_server, prefix, exc):
self.execute_report_failures(echo_server, protocol.release_control, prefix, exc)
def test_open_communication(self, protocol, echo_server, prefix, exc):
self.execute_report_failures(echo_server, protocol.open_communication, prefix, exc)
def test_close_communication(self, protocol, echo_server, prefix, exc):
self.execute_report_failures(echo_server, protocol.close_communication, prefix, exc)
@pytest.mark.skip(reason='Not implemented.')
@pytest.mark.usefixtures('echo_server')
class TestR3ProtocolPositions:
@pytest.mark.parametrize("n,p", [])
def test_set_position(self, protocol, n: str, p: Coordinate):
protocol.set_position(n, p)
@pytest.mark.parametrize("n,t", [])
def test_define_variable(self, protocol, n: str, t: str):
protocol.define_variable(n, var_type=t)
@pytest.mark.parametrize("t", [])
def test_linear_move(self, protocol, t: Coordinate):
protocol.linear_move(t)
@pytest.mark.parametrize("t", [])
def test_joint_move(self, protocol, t):
protocol.joint_move(t)
@pytest.mark.parametrize("s,t,c", [])
def test_circular_move_centre(self, protocol, s: str, t: str, c: str):
protocol.circular_move_centre(s, t, c)
@pytest.mark.parametrize("s,im,t", [])
def test_circular_move_intermediate(self, protocol, s: str, im: str, t: str):
protocol.circular_move_intermediate(s, im, t)
@pytest.mark.parametrize("s,im1,im2", [])
def test_circular_move_full(self, protocol, s: str, im1: str, im2: str):
protocol.circular_move_full(s, im1, im2)
def test_go_safe_pos(self, protocol):
protocol.go_safe_pos()
@pytest.mark.parametrize("prefix,exc", [(p, e) for p, e in ErrorDispatch.items()])
@pytest.mark.usefixtures('echo_server')
class TestR3ProtocolReader:
@staticmethod
def float_value(echo_server, func, prefix, exc, *, response):
"""
Test macro for getting float responses
"""
if exc is None:
echo_server.reconfigure(pre=prefix, msg=response)
assert func() == float(response)
else:
echo_server.reconfigure(pre=prefix, msg='')
with pytest.raises(exc):
func()
def test_get_override(self, protocol, echo_server, prefix, exc):
self.float_value(echo_server, protocol.get_override, prefix, exc, response='50.0')
def test_get_current_linear_speed(self, protocol, echo_server, prefix, exc):
self.float_value(echo_server, protocol.get_current_linear_speed, prefix, exc, response='50.0')
def test_get_current_joint_speed(self, protocol, echo_server, prefix, exc):
self.float_value(echo_server, protocol.get_current_joint_speed, prefix, exc, response='50.0')
def test_get_joint_borders(self, protocol, echo_server, prefix, exc):
response = 'MEJAR;-4.00, 4.00, -5.00, 1.00, 1.00, 16.00, -6.00, 16.00, -2.00, 1.00, -3.00, 3.00, -8.00, 8.00;10'
expected = (-4.0, 4.0, -5.0, 1.0, 1.0, 16.0, -6.0, 16.0, -2.0, 1.0, -3.0, 3.0)
# Test
if exc is None:
echo_server.reconfigure(pre=prefix, msg=response)
actual = protocol.get_joint_borders()
assert actual == expected
else:
echo_server.reconfigure(pre=prefix, msg='')
with pytest.raises(exc):
protocol.get_joint_borders()
def test_get_xyz_borders(self, protocol, echo_server, prefix, exc):
response = 'MEPAR;-4.00, 4.00, -5.00, 1.00, -1.00, 16.00;10'
expected = (-4.0, 4.0, -5.0, 1.0, -1.0, 16.0)
# Test
if exc is None:
echo_server.reconfigure(pre=prefix, msg=response)
actual = protocol.get_xyz_borders()
assert actual == expected
else:
echo_server.reconfigure(pre=prefix, msg='')
with pytest.raises(exc):
protocol.get_xyz_borders()
def test_get_current_xyzabc(self, protocol, echo_server, prefix, exc):
"""
Test that the response string can be converted correctly
:param protocol:
:param echo_server:
:param prefix:
:param exc:
:return:
"""
response = 'X;290.62;Y;-0.09;Z;11.26;A;-179.94;B;-0.26;C;179.93;L1;0.00;;6,0;100;0.00;00000000'
expected = Coordinate((290.62, -0.09, 11.26, -179.94, -0.26, 179.93), 'XYZABC')
# Test
if exc is None:
echo_server.reconfigure(pre=prefix, msg=response)
actual = protocol.get_current_xyzabc()
assert str(actual) == str(expected)
else:
echo_server.reconfigure(pre=prefix, msg='')
with pytest.raises(exc):
protocol.get_current_xyzabc()
def test_get_current_joint(self, protocol, echo_server, prefix, exc):
"""
Test that the response string can be converted correctly
:param protocol:
:param echo_server:
:param prefix:
:param exc:
:return:
"""
response = 'J1;290.62;J2;-0.09;J3;11.26;J4;-179.94;J5;-0.26;J6;179.93;L1;0.00;;6,0;100;0.00;00000000'
expected = Coordinate((290.62, -0.09, 11.26, -179.94, -0.26, 179.93), JOINTS)
# Test
if exc is None:
echo_server.reconfigure(pre=prefix, msg=response)
actual = protocol.get_current_joint()
assert str(actual) == str(expected)
else:
echo_server.reconfigure(pre=prefix, msg='')
with pytest.raises(exc):
protocol.get_current_joint()
def test_get_safe_pos(self, protocol, echo_server, prefix, exc):
if exc is None:
echo_server.reconfigure(pre=prefix, msg='JSAFE;0, 1, 2, 3, 4, 5; Blabla')
safe_pos = protocol.get_safe_pos()
assert str(safe_pos) == 'J10.00 J21.00 J32.00 J43.00 J54.00 J65.00'
print(safe_pos)
else:
echo_server.reconfigure(pre=prefix, msg='')
with pytest.raises(exc):
protocol.get_safe_pos()
def test_get_servo_state(self, protocol, echo_server, prefix, exc):
response = 'M_SVO=+1'
expected = 1
# Test
if exc is None:
echo_server.reconfigure(pre=prefix, msg=response)
actual = protocol.get_servo_state()
assert actual == expected
else:
echo_server.reconfigure(pre=prefix, msg='')
with pytest.raises(exc):
protocol.get_servo_state()
@pytest.mark.parametrize("prefix,exc", [(p, e) for p, e in ErrorDispatch.items()])
@pytest.mark.usefixtures('echo_server')
class TestR3ProtocolSetter:
@staticmethod
def limited_set(echo_server, func, *, prefix, exc, lbound, ubound):
"""
Test macro
:param echo_server:
:param func:
:param prefix:
:param exc:
:param lbound:
:param ubound:
:return:
"""
valid_interval = [lbound, ubound]
outside = [lbound - 1, ubound + 1]
if exc is None:
# Valid return configured, return value is not checked
echo_server.reconfigure(pre=prefix, msg='garbage')
# Should be possible to set
for i in valid_interval:
func(i)
# Should raise an exception
for i in outside:
with pytest.raises(ValueError):
func(i)
else:
# Error return configured
echo_server.reconfigure(pre=prefix, msg='more garbage')
# Impossible to set due to server error
for i in valid_interval:
with pytest.raises(exc):
func(i)
# Impossible to set due to input limitation
for i in outside:
with pytest.raises(ValueError):
func(i)
@pytest.mark.parametrize("wc", [])
def test_set_work_coordinate(self, protocol, wc: str, prefix, exc):
protocol.set_work_coordinate(wc)
def test_set_override(self, echo_server, protocol, prefix, exc):
self.limited_set(echo_server, protocol.set_override, prefix=prefix, exc=exc, lbound=1.0, ubound=100.0)
def test_set_linear_speed(self, echo_server, protocol, prefix, exc):
self.limited_set(echo_server, protocol.set_linear_speed, prefix=prefix, exc=exc, lbound=1.0, ubound=1000.0)
def test_set_joint_speed(self, echo_server, protocol, prefix, exc):
self.limited_set(echo_server, protocol.set_joint_speed, prefix=prefix, exc=exc, lbound=1.0, ubound=100.0)
@pytest.mark.skip(reason='Not implemented.')
@pytest.mark.usefixtures('echo_server')
class TestR3ProtocolResetter:
def test_reset_base_coordinate_system(self, protocol):
protocol.reset_base_coordinate_system()
def test_reset_override(self, echo_server, protocol):
"""
Test that the override can be reset.
:param protocol:
:return:
"""
override_initial = protocol.get_override()
# Change the override to 10%
protocol.set_override(10.0)
protocol.reset_override()
override_final = protocol.get_override()
assert override_initial == override_final
def test_reset_linear_speed(self, protocol):
"""
Test that resetting the linear speed reverts the changes.
:param protocol:
:return:
"""
lin_speed_initial = protocol.get_current_linear_speed()
protocol.set_linear_speed(lin_speed_initial + 10.0)
protocol.reset_linear_speed()
lin_speed_final = protocol.get_current_linear_speed()
assert lin_speed_initial == lin_speed_final
def test_reset_joint_speed(self, protocol):
"""
Test that resetting the joint speed reverts the changes.
:param protocol:
:return:
"""
joint_speed_initial = protocol.get_current_joint_speed()
protocol.set_joint_speed(10.0)
protocol.reset_joint_speed()
joint_speed_final = protocol.get_current_joint_speed()
assert joint_speed_initial == joint_speed_final
def test_reset_all_speeds(self, protocol):
"""
Test that all speed ranges are reset
:param protocol:
:return:
"""
lin_speed_initial = protocol.get_current_linear_speed()
joint_speed_initial = protocol.get_current_joint_speed()
protocol.set_linear_speed(lin_speed_initial + 10.0)
protocol.set_joint_speed(10.0)
protocol.reset_all_speeds()
lin_speed_final = protocol.get_current_linear_speed()
joint_speed_final = protocol.get_current_joint_speed()
assert lin_speed_initial == lin_speed_final
assert joint_speed_initial == joint_speed_final
| 2.109375 | 2 |
submitter/__main__.py | deconst/submitter | 1 | 12769568 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
import sys
from datetime import datetime
from .config import Config
from .submit import submit, SUCCESS, NOOP
c = Config(os.environ)
# Configure the logger.
# <= INFO to stdout
# >= WARNING to stderr
class LessThanFilter(logging.Filter):
def __init__(self, exclusive_maximum):
super().__init__('')
self.exclusive_maximum = exclusive_maximum
def filter(self, record):
if record.levelno < self.exclusive_maximum:
return 1
else:
return 0
if c.verbose:
level=logging.DEBUG
else:
level=logging.INFO
rootLogger = logging.getLogger()
rootLogger.setLevel(level)
plainFormatter = logging.Formatter('%(message)s')
outHandler = logging.StreamHandler(sys.stdout)
outHandler.setLevel(logging.DEBUG)
outHandler.addFilter(LessThanFilter(logging.WARNING))
outHandler.setFormatter(plainFormatter)
rootLogger.addHandler(outHandler)
errHandler = logging.StreamHandler(sys.stderr)
errHandler.setLevel(logging.WARNING)
errHandler.setFormatter(plainFormatter)
rootLogger.addHandler(errHandler)
# Squelch requests and urllib messages.
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
if not c.is_valid():
logging.error('Invalid configuration. Fix the following environment variables:')
for var in c.missing():
logging.error(" " + var)
for problem in c.problems:
logging.error(" " + problem)
sys.exit(1)
start = datetime.utcnow()
result = submit(c)
finish = datetime.utcnow()
pattern = 'Submitted {asset_uploaded} / {asset_total} assets and ' \
'{envelope_uploaded} / {envelope_total} envelopes in {duration}.'
summary = pattern.format(
asset_uploaded=result.asset_result.uploaded,
asset_total=len(result.asset_result.asset_set),
envelope_uploaded=result.envelope_result.uploaded,
envelope_total=len(result.envelope_result.envelope_set),
duration=finish - start
)
logging.info(summary)
if result.state is SUCCESS:
sys.exit(0)
elif result.state is NOOP:
# Signal to the Strider plugin that we did nothing.
sys.exit(2)
else:
# FAILURE
logging.error('Failed to upload {} envelopes.'.format(result.envelope_result.failed))
sys.exit(1)
| 2.5 | 2 |
germanetpy/wictionaryparaphrase.py | Blubberli/GermaNetPy | 2 | 12769569 | <reponame>Blubberli/GermaNetPy<filename>germanetpy/wictionaryparaphrase.py
class WiktionaryParaphrase:
def __init__(self, lexunit_id: str, wiktionary_id: str, wiktionary_sense_id: int, wiktionary_sense: str,
edited: bool):
"""
This class holds the Wictionary paraphrase object. A wictionary paraphrase can be part of lexical units. The
contain a definition of the lexical unit which helps to differentiate between different sense of a word.
:param lexunit_id: The lexical unit id, this wictionary entry belongs to
:param wiktionary_id: The corresponding wictionary identifier
:param wiktionary_sense_id: The sense identifier
:param wiktionary_sense: The sense definition
:param edited: If this paraphrase was edited.
"""
self._lexunit_id = lexunit_id
self._wiktionary_id = wiktionary_id
self._wiktionary_sense_id = wiktionary_sense_id
self._wiktionary_sense = wiktionary_sense
self._edited = edited
def __repr__(self):
return f'Wictionary(LexUnit ID={self.lexunit_id}, definition={self.wiktionary_sense})'
@property
def lexunit_id(self):
return self._lexunit_id
@property
def wiktionary_id(self):
return self._wiktionary_id
@property
def wiktionary_sense_id(self):
return self._wiktionary_sense_id
@property
def wiktionary_sense(self):
return self._wiktionary_sense
@property
def edited(self):
return self._edited
| 2.890625 | 3 |
modules/base.py | VinhLoiIT/stock-trend-predictions | 3 | 12769570 | <reponame>VinhLoiIT/stock-trend-predictions<gh_stars>1-10
class StockTrendPredictor:
def predict(self, headline: str) -> int:
r"""Predict stock trending from the given headline
Args:
headline: the news headline
Returns:
A prediction, 0 for not-changed or downward trend, 1 for upward trend
"""
pass
| 2.390625 | 2 |
scripts/rviz_spacenav.py | kalashgera/rviz_spacenav | 0 | 12769571 | <filename>scripts/rviz_spacenav.py<gh_stars>0
#!/usr/bin/env python
import numpy
import tf
import rospy
from geometry_msgs.msg import Twist
from view_controller_msgs.msg import CameraPlacement
class RvizSpacenav:
def __init__(self):
# parameters
self.hz = 20 # hz fot CameraPlacement Topic
self.Kp = 0.3 # Coefficient for position control
self.Kr = 2 # Coefficient for rotation control
# local variable
self.publisher = rospy.Publisher('camera', CameraPlacement, queue_size=10)
self.twist = None # subscribed message
self.lock = False
self.camera = CameraPlacement()
# First Publishe
rospy.sleep(0.1) # wait for connection
self.initialize()
# start listening
self.camera.time_from_start = rospy.Duration.from_sec(1/self.hz/2)
self.subscriber = rospy.Subscriber('spacenav_twist', Twist, self.spacenav_callback)
def initialize(self):
# set initial camera pose
self.camera.interpolation_mode = CameraPlacement.LINEAR
self.camera.up.vector.z = 1
self.camera.eye.point.x = 4
self.camera.eye.point.y = 4
self.camera.eye.point.z = 1
wait_duration = rospy.Duration(1)
self.camera.time_from_start = wait_duration
self.publisher.publish(self.camera)
rospy.sleep(wait_duration+rospy.Duration(0.1))
def spacenav_callback(self, msg):
if not self.lock:
self.twist = msg
def get_transformation(self):
'''
return R
x: vector in camera frame
y: vector in fixed frame
y = R*x
'''
camera = numpy.array([self.camera.eye.point.x,\
self.camera.eye.point.y,\
self.camera.eye.point.z])
focus = numpy.array([self.camera.focus.point.x,\
self.camera.focus.point.y,\
self.camera.focus.point.z])
up = numpy.array([self.camera.up.vector.x,\
self.camera.up.vector.y,\
self.camera.up.vector.z])
ex_ = tf.transformations.unit_vector(focus - camera)
ey_ = tf.transformations.unit_vector(numpy.cross(up, ex_))
ez_ = numpy.cross(ex_, ey_)
trans = numpy.array([ex_, ey_, ez_]).transpose()
return trans
def run(self):
rate = rospy.Rate(self.hz)
while not rospy.is_shutdown():
if self.twist is None:
continue
self.lock = True
msg = self.twist
linear_ = numpy.array([[msg.linear.x, msg.linear.y, msg.linear.z]]).transpose()
linear = numpy.dot(self.get_transformation(), linear_)
self.camera.eye.point.x += self.Kp*linear[0][0]
self.camera.eye.point.y += self.Kp*linear[1][0]
self.camera.eye.point.z += self.Kp*linear[2][0]
self.camera.focus.point.x += self.Kp*linear[0][0]
self.camera.focus.point.y += self.Kp*linear[1][0]
self.camera.focus.point.z += self.Kp*linear[2][0]
rot = numpy.dot(self.get_transformation(), numpy.array([[0, 0.1*msg.angular.z, -0.1*msg.angular.y]]).transpose())
self.camera.focus.point.x += self.Kr*rot[0][0]
self.camera.focus.point.y += self.Kr*rot[1][0]
self.camera.focus.point.z += self.Kr*rot[2][0]
self.publisher.publish(self.camera)
self.lock = False
rate.sleep()
if __name__=='__main__':
try:
rospy.init_node('rospy_spacenav', anonymous=True)
runner = RvizSpacenav()
runner.run()
except rospy.ROSInterruptException:
pass
| 2.171875 | 2 |
main.py | 850078897/Project-game- | 0 | 12769572 | <filename>main.py
import pygame
import pygame_textinput
import test_game
from pygame.locals import *
from pygame.compat import unichr_, unicode_
import sys
import locale
from room import Room
pygame.init()
display_width = 800
display_height = 533
clock = pygame.time.Clock()
crashed= False
intro_page= pygame.image.load('space.jpg')
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('lubenwei')
textinput = pygame_textinput.TextInput()
fg = 250,240,230
bg = 5,5,5
font = pygame.font.Font(None, 80)
text = 'Fonty'
size = font.size(text)
black = (0,0,0)
white = (255,255,255)
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
car_speed = 0
while not crashed:
roomnum=0
events = pygame.event.get()
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
elif event.key == pygame.K_RIGHT:
x_change = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
x += x_change
gameDisplay.fill(white)
this_room=Room(roomnum,gameDisplay)
this_room.showimg()
this_room.showtext()
#gameDisplay.blit(intro_page, (0, 0))
# load font, prepare values
# font = pygame.font.Font(None, 80)
#
# text = test_game.newbee
#
# textlist = wrapline(text,font,20)
#
# size = font.size(text)
# no AA, no transparancy, normal
# ren = font.render(text, 0, fg, bg)
#
# gameDisplay.blit(ren, (10, 10))
# Feed it with events every frame
#textinput.update(events)
# Blit its surface onto the screen
# gameDisplay.blit(textinput.get_surface(), (10, 10))
# a_sys_font = pygame.font.SysFont("BaBa", 60)
# a_sys_font.set_bold(1)
# mytext = textinput.get_text()
# ren = a_sys_font.render(mytext , 1 , fg, bg)
# gameDisplay.blit(ren, (0,60))
# a_sys_font.set_bold
pygame.display.update()
clock.tick(60)
pygame.quit()
quit()
| 3.015625 | 3 |
utils.py | codePerfectPlus/ComputerVision-Essentials | 15 | 12769573 | <gh_stars>10-100
from google_drive_downloader import GoogleDriveDownloader as gdd
download_dict = {
"16gAKScYAW0bZkyRgcLF71x28du_mLY8-": "assets/res10_300x300_ssd_iter_140000.caffemodel",
"1jUIwxXjxz8oC7I2Ta9vtiozsB4i95043": "Media/people-walking.mp4",
"1Q7qfr11olEFguRRkKRnC1Yah3ZnJCUnM": "assets/mask_rcnn_coco.h5"
}
for file_id, dest_path in download_dict.items():
gdd.download_file_from_google_drive(file_id=file_id,
dest_path=dest_path,
unzip=True)
| 2.578125 | 3 |
tests/helpers_for_tests.py | CrazyVideoGamez/auto-backup | 0 | 12769574 | <reponame>CrazyVideoGamez/auto-backup
"""Helpers for my tests"""
from subprocess import Popen, PIPE
from collections import namedtuple
# Runargs function
Output = namedtuple('Output', 'out, err')
def run_args_on_parser(cmd: list):
"""Run arguments to parser (doesn't create side effects).
Note: the output.out will return with no pathlib objects, so make sure to convert to a pathlib object if wanting to do something with the path file / dir."""
if not isinstance(cmd, list):
raise TypeError("cmd arg is not a list")
p = Popen("python ./backup/parser.py " + " ".join(cmd), shell=True, stdout=PIPE, stderr=PIPE)
out, err = [byte_str.decode("utf-8") for byte_str in p.communicate()]
if not out == "":
json_loaded_out = json.loads(out)
else:
json_loaded_out = ""
output = Output(json_loaded_out, err)
return output
import json
def get_queries() -> list:
try:
return json.loads(Path("./data/db.json").read_text())
except:
return []
# Reset Functions
from pathlib import Path
def rm_default_dir():
try:
Path("./data/dir.txt").unlink()
except:
#file not found error
pass
def reset_queries():
Path("./data/db.json").write_text("[]")
if __name__ == '__main__':
rm_default_dir()
reset_queries() | 2.734375 | 3 |
mpos/app_upgrade.py | cackharot/ngen-milk-pos | 0 | 12769575 | <filename>mpos/app_upgrade.py
import sys
import os
import re
from datetime import datetime
from glob import glob
import zipfile
from subprocess import call
EXTRACT_PATH = "/home/pi"
USB_DRV_PATH = "/home/pi/usbdrv"
FIRMWARE_PATH = "/home/pi/usbdrv/mpos_firmware"
# USB_DRV_PATH = "/media/usb0"
USB_DRV_MOUNT_PATH = "/dev/sda1"
VERSION_FILE = EXTRACT_PATH + "/mpos/_version.py"
DB_FILE = EXTRACT_PATH + "/mpos/web/app.db"
MIDORI_CONFIG_PATH = "/home/pi/.midori/"
APP_CONFIG_PATH = EXTRACT_PATH + "/mpos/config/"
APP_EXECUTE_FILE = EXTRACT_PATH + "/mpos/web/runprod.sh"
SPLASHSCREEN_FILE = dict(source=FIRMWARE_PATH + '/splash.png', target='/etc/splash.png')
COW_IMAGE_FILE = dict(source=FIRMWARE_PATH+'/cow.png', target=EXTRACT_PATH + '/mpos/web/static/images/cow.png')
FOOTER_IMAGE_FILE = dict(source=FIRMWARE_PATH+'/footer.png', target=EXTRACT_PATH + '/mpos/web/static/images/footer.png')
def is_usb_storage_connected():
return os.path.exists(USB_DRV_MOUNT_PATH) and os.path.ismount(USB_DRV_PATH)
def web_app_upgrade_task():
files = glob(os.path.join(FIRMWARE_PATH, "mpos*.zip"))
if len(files) > 0:
source_filename = files[0]
print("Found upgrade file at %s" % source_filename)
try:
with zipfile.ZipFile(source_filename) as zf:
current_version, upgrade_version = get_versions(zf)
if can_upgrade_version(current_version, upgrade_version):
backup_db(current_version)
zf.extractall(path=EXTRACT_PATH)
link_config_files()
restart_uwsgi()
print('UPGRADE SUCCESS!!! (%s ---> %s)' % (current_version, upgrade_version))
else:
print("FIRMWARE is already upto date (%s == %s)!!!" % (current_version, upgrade_version))
except Exception as e:
print(e)
print("UPGRADE FAILED!!!")
else:
print("No application upgrade file found..")
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def backup_db(version):
if not os.path.exists(DB_FILE):
return
filename = "app_data_%s.db" % (version.replace('.', '_'))
dest_file = os.path.join(FIRMWARE_PATH, 'data_backup', filename)
mkdir(os.path.dirname(dest_file))
call(['cp', DB_FILE, dest_file])
print("Backup database complete, %s" % (dest_file))
def link_config_files():
if not os.path.exists(APP_CONFIG_PATH):
print("No config folder found!")
return
link_xinitrc()
link_midori()
def link_xinitrc():
xinitrc_file = os.path.join(APP_CONFIG_PATH,'.xinitrc')
link_file(xinitrc_file, "/home/pi/.xinitrc")
def link_midori():
config_file = os.path.join(APP_CONFIG_PATH,'midori_config')
accels_file = os.path.join(APP_CONFIG_PATH,'midori_accels')
mkdir(MIDORI_CONFIG_PATH)
link_file(config_file, os.path.join(MIDORI_CONFIG_PATH,'config'))
link_file(accels_file, os.path.join(MIDORI_CONFIG_PATH,'accels'))
def link_file(src, dest):
if not os.path.exists(src):
return
try:
os.remove(dest)
os.symlink(src, dest)
print("Linking config file %s -> %s" % (src, dest))
except Exception as e:
print(e)
def restart_uwsgi():
# killall -9 uwsgi 2>/dev/null;
# sudo uwsgi --ini /home/pi/mpos/web/uwsgi.ini;
call([APP_EXECUTE_FILE])
def get_versions(zf):
current_version = read_version(VERSION_FILE)
upgrade_version = None
for member in zf.infolist():
if not '_version.py' in member.filename:
continue
with zf.open(member) as f:
upgrade_version = parse_version(f.read())
break
return current_version, upgrade_version
def can_upgrade_version(current_version, upgrade_version):
return upgrade_version != None and current_version != None and upgrade_version != current_version
def read_version(filename):
if not os.path.exists(filename):
return None
data = None
with open(VERSION_FILE, 'r') as r:
data = r.read()
return parse_version(data)
def parse_version(text):
s = re.search('__version__ = \'(?P<v>.*)\'', text)
if not s:
return None
return s.group('v')
def splash_screen_upgrade_task():
update_image_file(SPLASHSCREEN_FILE['source'], SPLASHSCREEN_FILE['target'])
def web_app_image_upgrade_task():
update_image_file(COW_IMAGE_FILE['source'], COW_IMAGE_FILE['target'])
update_image_file(FOOTER_IMAGE_FILE['source'], FOOTER_IMAGE_FILE['target'])
def update_image_file(src, dest):
if not os.path.exists(src):
return
os.remove(dest)
os.link(src, dest)
tasks = [web_app_upgrade_task, splash_screen_upgrade_task, web_app_image_upgrade_task]
if __name__ == "__main__":
print("MPOS --- Checking for upgrade file in pendrive")
if not is_usb_storage_connected():
print("NO USB PENDRIVE FOUND")
print("Exiting upgrade...")
elif not os.path.exists(FIRMWARE_PATH):
print("No upgrade file found..")
else:
for task in tasks:
task()
| 2.1875 | 2 |
plottrace.py | gvrooyen/SocialLearning | 1 | 12769576 | <reponame>gvrooyen/SocialLearning
# Copyright (c) 2012 Stellenbosch University, 2012
# This source code is released under the Academic Free License 3.0
# See https://github.com/gvrooyen/SocialLearning/blob/master/LICENSE for the full text of the license.
# Author: <NAME> <<EMAIL>>
"""
Script to estimate the rate at which the simulator's estimate of an agent's fitness,
converges to its final value. This can be used to decide how many rounds to play during
a genetic programming generation.
"""
from matplotlib.pylab import *
import matplotlib.pyplot as plt
from numpy import *
import pymongo
connection = pymongo.Connection()
db = connection.SocialLearning
# db.authenticate('', '')
dbc = db.trace_payoffs
estimate = []
X = range(0,10000,100)
for (i, record) in enumerate(dbc.find(limit=100)):
trace = 1. * array(record['trace'])
trace /= trace[-1]
trace = trace[0::100]
trace = abs(trace - 1.)
diff = zeros(100)
diff[1:100] = trace[1:100] - trace[0:99]
estimate.append(trace)
trace_mean = mean(estimate,0)
trace_std = std(estimate,0)
print size(X)
print size(trace_mean)
print size(trace_std)
plt.figure()
plt.errorbar(X,trace_mean,trace_std)
plt.figure()
plt.plot(diff)
show()
| 2.65625 | 3 |
python_modules/dagster/dagster/core/types/config_schema.py | vishvananda/dagster | 0 | 12769577 | from dagster import check
from dagster.utils import single_item
from .builtin_enum import BuiltinEnum
from .config import ConfigType, List, Nullable
from .wrapping import WrappingListType, WrappingNullableType
class InputSchema:
@property
def schema_type(self):
check.not_implemented(
'Must override schema_type in {klass}'.format(klass=type(self).__name__)
)
def construct_from_config_value(self, config_value):
return config_value
def resolve_config_cls_arg(config_cls):
if isinstance(config_cls, BuiltinEnum):
return ConfigType.from_builtin_enum(config_cls)
elif isinstance(config_cls, WrappingListType):
return List(resolve_config_cls_arg(config_cls.inner_type))
elif isinstance(config_cls, WrappingNullableType):
return Nullable(resolve_config_cls_arg(config_cls.inner_type))
else:
check.type_param(config_cls, 'config_cls')
check.param_invariant(issubclass(config_cls, ConfigType), 'config_cls')
return config_cls.inst()
def make_bare_input_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
class _InputSchema(InputSchema):
@property
def schema_type(self):
return config_type
return _InputSchema()
class OutputSchema:
@property
def schema_type(self):
check.not_implemented(
'Must override schema_type in {klass}'.format(klass=type(self).__name__)
)
def materialize_runtime_value(self, _config_value, _runtime_value):
check.not_implemented('Must implement')
def _create_input_schema(config_type, func):
class _InputSchema(InputSchema):
@property
def schema_type(self):
return config_type
def construct_from_config_value(self, config_value):
return func(config_value)
return _InputSchema()
def input_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
return lambda func: _create_input_schema(config_type, func)
def input_selector_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
check.param_invariant(config_type.is_selector, 'config_cls')
def _wrap(func):
def _selector(config_value):
selector_key, selector_value = single_item(config_value)
return func(selector_key, selector_value)
return _create_input_schema(config_type, _selector)
return _wrap
def _create_output_schema(config_type, func):
class _OutputSchema(OutputSchema):
@property
def schema_type(self):
return config_type
def materialize_runtime_value(self, config_value, runtime_value):
return func(config_value, runtime_value)
return _OutputSchema()
def output_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
return lambda func: _create_input_schema(config_type, func)
def output_selector_schema(config_cls):
config_type = resolve_config_cls_arg(config_cls)
check.param_invariant(config_type.is_selector, 'config_cls')
def _wrap(func):
def _selector(config_value, runtime_value):
selector_key, selector_value = single_item(config_value)
return func(selector_key, selector_value, runtime_value)
return _create_output_schema(config_type, _selector)
return _wrap
| 2.09375 | 2 |
examples/Skype_model/agent_skypeClient.py | Juliet-Chunli/cnss | 14 | 12769578 | <reponame>Juliet-Chunli/cnss<gh_stars>10-100
'''Implementation of a Skype-like Peer-2-Peer network agent
@author: <NAME> <<EMAIL>>
'''
from ComplexNetworkSim import NetworkAgent, Sim
#states:
DISABLED = 0
ENABLED = 1
ENABLED_S = 2
DISABLED_S = 3
DEAD = 4
#type:
NODE = 0
SUPERNODE = 1
class Skype(NetworkAgent):
""" agent behaviour for Skype-like Peer-2-Peer network simulation"""
#class constants (shared between all instances)
TIME = 1.0
def __init__(self, state, initialiser):
NetworkAgent.__init__(self, state, initialiser)
self.threshold = self.globalSharedParameters['threshold']
self.currentSupernodeID = -1
self.connected = False
#note the "eval(" - parameter has to be a string of executable Python code
self.restartTime = eval(self.globalSharedParameters['restart_time'])
self.knownSupernodes = []
self.knownSupernodesDistribute = []
self.supers = []
self.connections = 0
def Run(self):
self.setInitialNodeType()
while True:
#regular node behaviour
if self.nodeType == NODE:
if not self.connected or self.getAgent(self.currentSupernodeID).state >= DISABLED_S:
self.state = DISABLED
self.reconnect()
#supernode behaviour
elif self.nodeType == SUPERNODE:
if self.state == DEAD:
yield Sim.passivate, self
if not self.knownSupernodes:
self.computeKnownSupernodes()
if self.tooManyConnections() or self.state==DISABLED_S:
self.deactivate()
yield Sim.hold, self, self.restartTime
self.state = ENABLED_S
else:
pass
#in any case:
yield Sim.hold, self, NetworkAgent.TIMESTEP_DEFAULT
def computeKnownSupernodes(self):#
self.knownSupernodes = self.globalSharedParameters['supernodes']
self.knownSupernodesDistribute = self.knownSupernodes[:] #a copy
def deactivate(self):
self.state = DISABLED_S
self.connections = 0
def deactivate_permanently(self):
self.deactivate()
self.state = DEAD
def reconnect(self):
if self.connected:
self.globalTopology.remove_edge(self.id, self.currentSupernodeID)
self.connected = False
if self.supers:
self.currentSupernodeID = Skype.r.choice(self.supers)
superagent = self.getAgent(self.currentSupernodeID)
if superagent.state == ENABLED_S:
superagent.connections += 1
if superagent.connections > self.globalSharedParameters['threshold']:
superagent.deactivate()
self.globalTopology.add_edge(self.id, self.currentSupernodeID)
self.state = ENABLED
self.connected = True
else:
self.queryNode(self.globalSharedParameters['permanent_supernodes'][0])
def queryNode(self, id):
i = 0
while i < self.globalSharedParameters['cache_size']:
agent = self.getAgent(id)
if agent.state >= DISABLED_S:
return
if agent.knownSupernodesDistribute:
self.supers.append(agent.knownSupernodesDistribute.pop())
else:
self.r.shuffle(agent.knownSupernodes)
agent.knownSupernodesDistribute = agent.knownSupernodes[:] #a copy
self.supers.append(agent.knownSupernodesDistribute.pop())
i += 1
def setInitialNodeType(self):
if len(self.getNeighbouringNodes()) > 2:
self.nodeType = SUPERNODE
self.state = ENABLED_S
else:
self.nodeType = NODE
def tooManyConnections(self):
''' return true if node has too many connections,
false otherwise'''
return len(self.getNeighbouringNodes()) > \
(self.globalSharedParameters['threshold'] + len(self.knownSupernodes)) | 2.90625 | 3 |
blog/urls.py | boost-entropy-repos-org/ojas | 0 | 12769579 | from django.conf.urls import url
from django.urls import path,include
from . import views
from .feeds import LatestPostsFeed
from .views import search, PostViewSet
from rest_framework import routers
from django.views.generic import TemplateView
router = routers.DefaultRouter()
router.register(r'api', PostViewSet)
app_name = 'blog'
urlpatterns = [
path('', views.most_viewed, name='most_viewed'),
path('article/', views.post_list, name='post_list'),
url(r'^tag/(?P<tag_slug>[-\w]+)/$', views.post_list, name='post_list_by_tag'),
url(r'^blog/(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})/(?P<post>[-\w]+)/$', views.post_detail, name='post_detail'),
path('feed/', LatestPostsFeed(), name='post_feed'),
path('about/', views.about_page, name='about'),
path('contact/', views.contact_page, name='contact_page'),
url(r'^author/(?P<post_author>[-\w]+)/$',views.post_author, name='post_author'),
path('search', search, name='search'),
path('me/', views.me, name='me'),
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls')),
path('offline/', views.offline, name='offline'),
path('fill-dynamic-cache/<int:id>', views.fill_dynamic_cache, name='fill_dynamic_cache'),
path('must-not-cache', views.must_not_cache, name='must_not_cache'),
path(
'sw.js',
views.ServiceWorkerView.as_view(),
name=views.ServiceWorkerView.name,
),
] | 1.992188 | 2 |
src/electionguard/utils.py | ajweiss/electionguard-python | 1 | 12769580 | <reponame>ajweiss/electionguard-python<gh_stars>1-10
from datetime import datetime
from typing import Callable, Optional, TypeVar
T = TypeVar("T")
U = TypeVar("U")
def get_optional(optional: Optional[T]) -> T:
"""
General-purpose unwrapping function to handle `Optional`.
Raises an exception if it's actually `None`, otherwise
returns the internal type.
"""
assert optional is not None, "Unwrap called on None"
return optional
def match_optional(
optional: Optional[T], none_func: Callable[[], U], some_func: Callable[[T], U]
) -> U:
"""
General-purpose pattern-matching function to handle `Optional`.
If it's actually `None`, the `none_func` lambda is called.
Otherwise, the `some_func` lambda is called with the value.
"""
if optional is None:
return none_func()
else:
return some_func(optional)
def get_or_else_optional(optional: Optional[T], alt_value: T) -> T:
"""
General-purpose getter for `Optional`. If it's `None`, returns the `alt_value`.
Otherwise, returns the contents of `optional`.
"""
if optional is None:
return alt_value
else:
return optional
def get_or_else_optional_func(optional: Optional[T], func: Callable[[], T]) -> T:
"""
General-purpose getter for `Optional`. If it's `None`, calls the lambda `func`
and returns its value. Otherwise, returns the contents of `optional`.
"""
if optional is None:
return func()
else:
return optional
def flatmap_optional(optional: Optional[T], mapper: Callable[[T], U]) -> Optional[U]:
"""
General-purpose flatmapping on `Optional`. If it's `None`, returns `None` as well,
otherwise returns the lambda applied to the contents.
"""
if optional is None:
return None
else:
return mapper(optional)
def to_ticks(date_time: datetime) -> int:
"""
Return the number of ticks for a date time
:param date_time: Date time to convert
:return: number of ticks
"""
t0 = datetime(1, 1, 1)
seconds = int((date_time - t0).total_seconds())
ticks = seconds * 10 ** 7
return ticks
| 2.953125 | 3 |
tests/test_dependencies/test_overrides.py | adriangb/xpresso | 75 | 12769581 | <filename>tests/test_dependencies/test_overrides.py
from dataclasses import dataclass
from di.dependant import Marker
from xpresso import App, Depends, Path
from xpresso.dependencies import Injectable
from xpresso.testclient import TestClient
from xpresso.typing import Annotated
def test_override_with_marker() -> None:
def dep() -> int:
...
async def endpoint(v: Annotated[int, Depends(dep)]) -> int:
return v
app = App([Path("/", get=endpoint)])
app.dependency_overrides[dep] = lambda: 2
client = TestClient(app)
resp = client.get("/")
assert resp.status_code == 200, resp.content
assert resp.json() == 2
def test_override_with_non_xpresso_marker() -> None:
def dep() -> int:
...
async def endpoint(v: Annotated[int, Marker(dep, scope="endpoint")]) -> int:
return v
app = App([Path("/", get=endpoint)])
app.dependency_overrides[dep] = lambda: 2
client = TestClient(app)
resp = client.get("/")
assert resp.status_code == 200, resp.content
assert resp.json() == 2
def test_override_match_by_annotation() -> None:
@dataclass
class Foo:
bar: str = "bar"
async def endpoint(foo: Foo) -> str:
return foo.bar
app = App([Path("/", get=endpoint)])
app.dependency_overrides[Foo] = lambda: Foo(bar="baz")
client = TestClient(app)
resp = client.get("/")
assert resp.status_code == 200, resp.content
assert resp.json() == "baz"
def test_override_injectable_cls() -> None:
@dataclass
class Foo(Injectable):
bar: str = "bar"
async def endpoint(foo: Foo) -> str:
return foo.bar
app = App([Path("/", get=endpoint)])
app.dependency_overrides[Foo] = lambda: Foo(bar="baz")
client = TestClient(app)
resp = client.get("/")
assert resp.status_code == 200, resp.content
assert resp.json() == "baz"
| 2.171875 | 2 |
eclipse-mosquitto/test/broker/09-plugin-auth-acl-pub.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 2 | 12769582 | <gh_stars>1-10
#!/usr/bin/env python3
# Bug specific test - if a QoS2 publish is denied, then we publish again with
# the same mid to a topic that is allowed, does it work properly?
from mosq_test_helper import *
def write_config(filename, port, plugin_ver):
with open(filename, 'w') as f:
f.write("port %d\n" % (port))
f.write("auth_plugin c/auth_plugin_v%d.so\n" % (plugin_ver))
f.write("allow_anonymous false\n")
def do_test(plugin_ver):
port = mosq_test.get_port()
conf_file = os.path.basename(__file__).replace('.py', '.conf')
write_config(conf_file, port, plugin_ver)
rc = 1
keepalive = 10
connect1_packet = mosq_test.gen_connect("connect-uname-pwd-test", keepalive=keepalive, username="readwrite", clean_session=False)
connack1_packet = mosq_test.gen_connack(rc=0)
connect2_packet = mosq_test.gen_connect("connect-uname-pwd-test", keepalive=keepalive, username="readwrite", clean_session=False)
connack2_packet = mosq_test.gen_connack(rc=0,flags=1)
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "readonly", 2)
suback_packet = mosq_test.gen_suback(mid, 2)
mid = 2
publish1_packet = mosq_test.gen_publish("readonly", qos=2, mid=mid, payload="message")
pubrec1_packet = mosq_test.gen_pubrec(mid)
pubrel1_packet = mosq_test.gen_pubrel(mid)
pubcomp1_packet = mosq_test.gen_pubcomp(mid)
mid = 2
publish2_packet = mosq_test.gen_publish("writeable", qos=1, mid=mid, payload="message")
puback2_packet = mosq_test.gen_puback(mid)
broker = mosq_test.start_broker(filename=os.path.basename(__file__), use_conf=True, port=port)
try:
sock = mosq_test.do_client_connect(connect1_packet, connack1_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, publish1_packet, pubrec1_packet, "pubrec1")
sock.close()
sock = mosq_test.do_client_connect(connect2_packet, connack2_packet, timeout=20, port=port)
mosq_test.do_send_receive(sock, publish2_packet, puback2_packet, "puback2")
mosq_test.do_ping(sock)
rc = 0
sock.close()
except mosq_test.TestError:
pass
finally:
os.remove(conf_file)
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
exit(rc)
do_test(4)
do_test(5)
| 2.1875 | 2 |
mopidy_sevensegmentdisplay/actor.py | JuMalIO/mopidy-sevensegmentdisplay | 0 | 12769583 | <reponame>JuMalIO/mopidy-sevensegmentdisplay
from __future__ import unicode_literals
import pykka
import logging
from mopidy.core import CoreListener
from worker import Worker
logger = logging.getLogger('Frontend')
class Frontend(pykka.ThreadingActor, CoreListener):
worker = Worker()
def __init__(self, config, core):
self.config = config['sevensegmentdisplay']
self.core = core
super(Frontend, self).__init__()
def on_start(self):
logger.warning('started')
self.worker.start(self.config, self.core)
def on_stop(self):
logger.warning('stopped')
self.worker.stop()
def on_failure(self, exception_type, exception_value, traceback):
logger.warning('failing')
self.worker.stop()
def on_event(self, event, **kwargs):
if (event == 'stream_title_changed'):
self.worker.on_seeked()
return CoreListener.on_event(self, event, **kwargs)
def playback_state_changed(self, old_state, new_state):
self.worker.on_playback_state_changed(old_state, new_state)
# def track_playback_started(self, tl_track):
# logger.warning('playback_started!')
# self.worker.on_playing()
# def track_playback_paused(self, tl_track, time_position):
# logger.warning('playback_paused!')
# self.worker.on_paused()
# def track_playback_resumed(self, tl_track, time_position):
# logger.warning('playback_resumed!')
# self.worker.on_playing()
# def track_playback_ended(self, tl_track, time_position):
# logger.warning('playback_ended!')
# self.worker.on_stopped()
def volume_changed(self, volume):
self.worker.on_volume_changed(volume)
def mute_changed(self, mute):
self.worker.on_mute(mute)
def seeked(self, time_position):
self.worker.on_seeked()
# def playlists_loaded(self):
# logger.warning('Received playlists_loaded event')
# def playlist_changed(self, playlist):
# logger.warning('Received playlist_changed event')
| 2.03125 | 2 |
src/secondaires/navigation/equipage/postes/__init__.py | stormi/tsunami | 0 | 12769584 | <filename>src/secondaires/navigation/equipage/postes/__init__.py
# -*-coding:Utf-8 -*
# Copyright (c) 2012 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package postes contenant les différents postes.
Chaque poste est dans un fichier distinct.
La définition d'un poste se fait dans la classe Poste, détaillée plus bas.
Ce fichier contient également la métaclasse des postes, MetaPoste.
"""
postes = {}
class MetaPoste(type):
"""Métaclasse des postes disponibles pour un membre d'équipage.
Elle ajoute le poste dans le dictionnaire 'postes' si il possède
un nom.
"""
def __init__(cls, nom, bases, contenu):
"""Constructeur de la métaclasse"""
type.__init__(cls, nom, bases, contenu)
if cls.nom:
postes[cls.nom] = cls()
class Poste(metaclass=MetaPoste):
"""Classe définissant un poste occupé par un membre d'équipage.
Les attributs d'un poste sont :
nom -- le nom du poste
autorite -- un entier définissant l'autorité du poste
nom_parent -- le nom du poste parent
"""
nom = ""
points = 0
def __init__(self):
"""Constructeur du poste."""
self.autorite = 0
self.nom_parent = ""
@property
def parent(self):
"""Retourne le poste parent si existe ou None sinon."""
return postes.get(self.nom_parent)
def __repr__(self):
return "<poste {}>".format(repr(self.nom))
def __str__(self):
return self.nom
from . import capitaine
from . import second
from . import maitre_equipage
from . import officier
from . import matelot
from . import artilleur
from . import voilier
from . import charpentier
from . import vigie
from . import rameur
from . import sabreur
from . import chirurgien
from . import maitre_cuisinier
| 1.210938 | 1 |
manage.py | Mmlh1/effbot | 3 | 12769585 | import subprocess
import os, signal, time
def run_as_test():
proc = subprocess.Popen('python ./bot.py')
time.sleep(10)
if getattr(signal, 'SIGKILL', None):
os.kill(proc.pid, signal.SIGKILL)
else:
os.kill(proc.pid, signal.SIGTERM)
return True
# def run_as_live():
# proc = subprocess.Popen('python bot.py')
# try:
# while True:
# time.sleep(10)
# except KeyBoardInterrupt:
# os.kill(proc.pid, signal.SIGKILL)
| 2.421875 | 2 |
presidio-analyzer/presidio_analyzer/batch_analyzer_engine.py | presid-io/presidio | 0 | 12769586 | import logging
from typing import List, Iterable, Dict, Union, Any, Optional, Iterator, Tuple
from presidio_analyzer import DictAnalyzerResult, RecognizerResult, AnalyzerEngine
from presidio_analyzer.nlp_engine import NlpArtifacts
logger = logging.getLogger("presidio-analyzer")
class BatchAnalyzerEngine:
"""
Batch analysis of documents (tables, lists, dicts).
Wrapper class to run Presidio Analyzer Engine on multiple values,
either lists/iterators of strings, or dictionaries.
:param: analyzer_engine: AnalyzerEngine instance to use
for handling the values in those collections.
"""
def __init__(self, analyzer_engine: Optional[AnalyzerEngine] = None):
self.analyzer_engine = analyzer_engine
if not analyzer_engine:
self.analyzer_engine = AnalyzerEngine()
def analyze_iterator(
self,
texts: Iterable[Union[str, bool, float, int]],
language: str,
**kwargs,
) -> List[List[RecognizerResult]]:
"""
Analyze an iterable of strings.
:param texts: An list containing strings to be analyzed.
:param language: Input language
:param kwargs: Additional parameters for the `AnalyzerEngine.analyze` method.
"""
# validate types
texts = self._validate_types(texts)
# Process the texts as batch for improved performance
nlp_artifacts_batch: Iterator[
Tuple[str, NlpArtifacts]
] = self.analyzer_engine.nlp_engine.process_batch(
texts=texts, language=language
)
list_results = []
for text, nlp_artifacts in nlp_artifacts_batch:
results = self.analyzer_engine.analyze(
text=str(text), nlp_artifacts=nlp_artifacts, language=language, **kwargs
)
list_results.append(results)
return list_results
def analyze_dict(
self,
input_dict: Dict[str, Union[Any, Iterable[Any]]],
language: str,
keys_to_skip: Optional[List[str]] = None,
**kwargs,
) -> Iterator[DictAnalyzerResult]:
"""
Analyze a dictionary of keys (strings) and values/iterable of values.
Non-string values are returned as is.
:param input_dict: The input dictionary for analysis
:param language: Input language
:param keys_to_skip: Keys to ignore during analysis
:param kwargs: Additional keyword arguments
for the `AnalyzerEngine.analyze` method.
Use this to pass arguments to the analyze method,
such as `ad_hoc_recognizers`, `context`, `return_decision_process`.
See `AnalyzerEngine.analyze` for the full list.
"""
context = []
if "context" in kwargs:
context = kwargs["context"]
del kwargs["context"]
if not keys_to_skip:
keys_to_skip = []
for key, value in input_dict.items():
if not value or key in keys_to_skip:
yield DictAnalyzerResult(key=key, value=value, recognizer_results=[])
continue # skip this key as requested
# Add the key as an additional context
specific_context = context[:]
specific_context.append(key)
if type(value) in (str, int, bool, float):
results: List[RecognizerResult] = self.analyzer_engine.analyze(
text=str(value), language=language, context=[key], **kwargs
)
elif isinstance(value, dict):
new_keys_to_skip = self._get_nested_keys_to_skip(key, keys_to_skip)
results = self.analyze_dict(
input_dict=value,
language=language,
context=specific_context,
keys_to_skip=new_keys_to_skip,
**kwargs,
)
elif isinstance(value, Iterable):
# Recursively iterate nested dicts
results: List[List[RecognizerResult]] = self.analyze_iterator(
texts=value,
language=language,
context=specific_context,
**kwargs,
)
else:
raise ValueError(f"type {type(value)} is unsupported.")
yield DictAnalyzerResult(key=key, value=value, recognizer_results=results)
@staticmethod
def _validate_types(value_iterator: Iterable[Any]) -> Iterator[Any]:
for val in value_iterator:
if not type(val) in (int, float, bool, str):
err_msg = (
"Analyzer.analyze_iterator only works "
"on primitive types (int, float, bool, str). "
"Lists of objects are not yet supported."
)
logger.error(err_msg)
raise ValueError(err_msg)
yield val
@staticmethod
def _get_nested_keys_to_skip(key, keys_to_skip):
new_keys_to_skip = [
k.replace(f"{key}.", "") for k in keys_to_skip if k.startswith(key)
]
return new_keys_to_skip
| 2.609375 | 3 |
2020/day3/day3-toboggan_trajectory.py | calvinatian/Advent-of-Code | 0 | 12769587 | # Part 1: 187
# Part 2: 4723283400
class Slope():
def __init__(self, path):
"""
input path to text file
text file should contain map data consisting of "." for open spaces, "#" for trees
"""
# put file data into 2D array of characters "." and "#"
with open(path, "r") as f:
self._data = [line.strip() for line in f.readlines()]
self._loop_length = len(self._data[0])
def __len__(self):
"""
returns number of rows in the data map
"""
return len(self._data)
def __repr__(self):
return str(self._data)
def __iter__(self):
"""
generator for each row as a list of characters
"""
for i in self._data:
yield i
def slopecheck(self, right, down=1):
"""
right = number of spaces to move right (positive integer), can be 0, no default value
down = number of spaces to move down (positive integer). cannot be 0, default = 1
"""
if down < 1 or right < 0:
raise ValueError("Right slope cannot be below 0. Down slope cannot be below 1")
position = trees = skip = 0
for line in self:
if skip > 1:
skip -= 1
continue # skip this loop iteration
if line[position] == "#": # increase tree count
trees += 1
position = (position + right) % self._loop_length # move position to the right
skip = down # set number of lines to skip
return trees
if __name__ == "__main__":
test = Slope(r"2020\day3\input.txt")
t1 = test.slopecheck(3,1) # 187
t2 = test.slopecheck(1,1) # 86
t3 = test.slopecheck(5,1) # 75
t4 = test.slopecheck(7,1) # 89
t5 = test.slopecheck(1,2) # 44
print(t1)
print(t1 * t2 * t3 * t4 *t5)
| 3.984375 | 4 |
faceai/opencv/trackbar.py | xurohanmm/faceai | 9,944 | 12769588 | #coding=utf-8
#调色板
import cv2
import numpy as np
img = np.zeros((300, 512, 3), np.uint8)
cv2.namedWindow('image')
def callback(x):
pass
#参数1:名称;参数2:作用窗口,参数3、4:最小值和最大值;参数5:值更改回调方法
cv2.createTrackbar('R', 'image', 0, 255, callback)
cv2.createTrackbar('G', 'image', 0, 255, callback)
cv2.createTrackbar('B', 'image', 0, 255, callback)
while (1):
cv2.imshow('image', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
r = cv2.getTrackbarPos('R', 'image')
g = cv2.getTrackbarPos('G', 'image')
b = cv2.getTrackbarPos('B', 'image')
img[:] = [b, g, r]
cv2.destroyAllWindows() | 3.234375 | 3 |
baseOptimizer.py | DanielMckenzie/HadRGD | 0 | 12769589 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Base class. Eventually all optimizers will be implemented as classes with this
as the base class.
"""
class BaseOptimizer(object):
'''
Placeholder for now. Will eventually convert all methods to
instances of this class.
'''
pass | 2.40625 | 2 |
Flooder/SyncFlood.py | FalseG0d/BasicHacking | 0 | 12769590 | <gh_stars>0
#!/usr/bin/python
from scapy.all import *
def syncFlood(src,tgt,message):
for dport in range(1024,65535):
IPLayer=IP(src=src,dst=tgt)
TCPlayer=TCP(sport=4444,dport=dport)
RawLayer=Raw(load=message)
pkt=IPLayer/TCPlayer/RawLayer
send(pkt)
source="8.8.8.8" #raw_input("[*] Enetr Source IP to fake:")
target="192.168.1.1" #raw_input("[*] Enter Target IP Address:")
message="Hello World" #raw_input("[*] Enter message for TCP Payload:")
while True:
syncFlood(source,target,message)
| 2.828125 | 3 |
threatconnect-sdk-addons/app-package-plugin/src/test/resources/package/python/__main__.py | ThreatConnect-Inc/threatconnect-java | 4 | 12769591 | import os
import sys
# import inspect
def main():
lib_directory = None
# All Python Version that will be searched
lib_major_version = 'lib_{}'.format(sys.version_info.major)
lib_minor_version = '{}.{}'.format(lib_major_version, sys.version_info.minor)
lib_micro_version = '{}.{}'.format(lib_minor_version, sys.version_info.micro)
# get all "lib" directories
app_path = os.getcwd()
contents = os.listdir(app_path)
lib_directories = []
for c in contents:
# ensure content starts with lib, is directory, and is readable
if c.startswith('lib') and os.path.isdir(c) and (os.access(c, os.R_OK)):
lib_directories.append(c)
# find most appropriate FULL version
if lib_micro_version in lib_directories:
lib_directory = lib_micro_version
elif lib_minor_version in lib_directories:
lib_directory = lib_minor_version
elif lib_major_version in lib_directories:
lib_directory = lib_major_version
else:
# file most appropriate PARTIAL version
for ld in lib_directories:
if lib_micro_version in ld:
lib_directory = ld
elif lib_minor_version in ld:
lib_directory = ld
elif lib_major_version in ld:
lib_directory = ld
if lib_directory is None:
print('Failed to find lib directory ({}).'.format(lib_directories))
sys.exit(1)
# use this if you want to include modules from a subfolder
# lib_path = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], lib_directory)))
lib_path = os.path.join(app_path, lib_directory)
for root, directories, files in os.walk(lib_path):
while len(directories) > 0:
module = os.path.join(root, directories.pop(0))
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = '{}{}{}'.format(module, os.pathsep, os.environ['PYTHONPATH'])
else:
os.environ['PYTHONPATH'] = '{}'.format(module)
# os.environ['LD_LIBRARY_PATH'] = ''
sys.argv[0] = sys.executable
sys.argv[1] = '{}.py'.format(sys.argv[1])
os.execv('{}'.format(sys.executable), sys.argv)
if __name__ == '__main__':
main() | 2.71875 | 3 |
app/main.py | huangstan1215/IRCPokerDatabase | 1 | 12769592 | import os
from irc_poker_data_set import IrcPokerData as ipd
basedir = os.path.abspath(os.path.dirname(__file__))
irc_poker_data = ipd()
irc_poker_data.open()
from irc_poker_db import db_session, PlayerRanking
def player_ranking():
if db_session.query(PlayerRanking).count() >= db_session.query(ipd.Player).count():
return
### travers all players
i = 0
players = ipd.db_session.query(ipd.Player).all()
for player in players:
i += 1
if i%100 == 0:
db_session.commit()
p = db_session.query(PlayerRanking).filter(PlayerRanking.player_id==player.id).first()
if p:
print(p.__repr__())
continue
total_pay = 0
total_win = 0
round_num = 0
rounds = ipd.db_session.query(ipd.Round).filter(ipd.Round.player_id==player.id).all()
for round in rounds:
total_pay += round.pay
total_win += round.win
round_num += 1
average_points = (total_win-total_pay)/round_num
player_ranking = PlayerRanking(player_id=player.id,
player_name=player.player_name,
average_points=average_points,
total_pay=total_pay,
total_win=total_win,
round_num=round_num)
print(player_ranking.__repr__())
db_session.add(player_ranking)
db_session.commit()
player_ranking()
| 2.6875 | 3 |
modules/manip_info_formater.py | ANSSI-FR/Faults_analyzer | 6 | 12769593 | import os, sys
from importlib import import_module
sys.path += [os.getcwd()]
def get_params(params_file, import_path=""):
"""Extract the params object from a given Python file.
:param str params_file: the Python file to get the params object from.
:returns: the params object from the given Python file.
"""
sys.path += [import_path]
module_name = params_file.replace(".py", "").replace("/", ".")
module = import_module(module_name)
return module.params
def format_manip_info(manip_info):
"""Format the ANSSI/LSC manip_info dictionary format into a more generic format compliant with the Manip class.
:param dict manip_info: the information about the a manip to format.
:returns: a dictionary containing all the attributes for creating a Manip class.
"""
result_file = manip_info["base_dir"] + "/devices/" + manip_info["device"] + "/manips/" + manip_info["manip_name"] + "/results/" + manip_info["result_name"] + "/main.csv"
analysis_params = get_params(manip_info["params_file"])
id_name = manip_info["id_name"]
ret = {
"result_file": result_file,
"analysis_params": analysis_params,
"id_name": id_name
}
return ret
| 3.4375 | 3 |
buildbuild/properties/tests/test_available_language.py | buildbuild/buildbuild | 5 | 12769594 | from properties.models import AvailableLanguage
from django.test import TestCase
from django.core.exceptions import ObjectDoesNotExist
class TestLanguage(TestCase):
fixtures = ['properties_data.yaml']
def setUp(self):
self.non_exist_lang = "never_exist_lang"
self.lang_python = "Python"
self.lang_ruby = "Ruby"
def test_get_all_available_language(self):
self.assertIsNotNone(AvailableLanguage.objects.all())
def test_get_python(self):
self.assertIsNotNone(AvailableLanguage.objects.get(lang=self.lang_python))
def test_get_python_value_must_be_equal_to_python(self):
language_object = AvailableLanguage.objects.get(lang=self.lang_python)
self.assertEqual(self.lang_python, language_object.lang)
def test_get_ruby(self):
self.assertIsNotNone(AvailableLanguage.objects.get(lang=self.lang_ruby))
def test_get_ruby_value_must_be_equal_to_ruby(self):
language_object = AvailableLanguage.objects.get(lang=self.lang_ruby)
self.assertEqual(self.lang_ruby, language_object.lang)
def test_get_non_exist_language_must_be_fail(self):
self.assertRaises(
ObjectDoesNotExist,
AvailableLanguage.objects.get,
lang = self.non_exist_lang,
)
| 2.328125 | 2 |
pyorb/diagram/orbitallevels.py | sunxb05/aichem | 1 | 12769595 | # -*- coding: utf-8 -*-
from .energydiagram import ED
import matplotlib.pyplot as plt
import re
ADJUSTEDCOEFFICIENT=0.02
def GetFrontIndex(orbSign):
# In: HOMO/LUMO/HOMO-1/LUMO+1
# Out: {'hoLu': 'HOMO', 'num': -1}
for matchString in [r'HOMO(.*)', r'LUMO(.*)']:
matchObj = re.match(matchString, orbSign)
if matchObj:
hoLu = re.sub(r'(.[0-9]+)', "", matchObj.group())
num = re.sub(r'([a-zA-Z]+)', "", matchObj.group())
if num:
return {'hoLu': hoLu, 'num': num}
else:
return {'hoLu': hoLu, 'num': 0}
def AdjustEnergy(orbList, orbEnergylistsorted):
def ModifyMOName(MO):
# Taking MO such as HOMO LUMO HOMO-1 LUMO+1, convert into H L L+1 H-1
if MO == 'HOMO':
newMO = -0.5
elif MO == 'LUMO':
# "L need a extra empty space, which is weired"
newMO = 0.5
else:
newMO = int(MO[-2:])
return newMO
orbListnew = sorted(orbList, key=ModifyMOName)
sortedEnergylist = []
for orb in orbList:
for orbSort, energy in zip(orbListnew, orbEnergylistsorted):
if orbSort == orb:
sortedEnergylist.append(energy)
return sortedEnergylist
def ModifyOrbLevel(orbEnergylist, energyGap, orbList):
orbEnergylistsorted = sorted(orbEnergylist)
# adjust the energy of the orbital levels
for index, i in enumerate(orbEnergylistsorted):
if index != 0:
for j in orbEnergylistsorted[:index]:
if abs(orbEnergylistsorted[index] - j) < energyGap:
orbEnergylistsorted[index] = orbEnergylistsorted[index] + energyGap
sortedEnergylist = AdjustEnergy(orbList, orbEnergylistsorted)
return sortedEnergylist
def NewdataSummarylist(dataSummarylist, energyGap=0.1):
frag1Energylist = []
frag1Orblist = []
frag2Energylist = []
frag2Orblist = []
complexEnergylist = []
complexOrblist = []
frag1Countertotal = 0
frag2Countertotal = 0
complexCountertotal = 0
for orbList in dataSummarylist:
for key, val in list(orbList['compOrb1'].items()):
if val['fragType'] == 'frag1':
frag1Energylist.append(val['fragEnergy'])
frag1Orblist.append(val['fragOrb'])
else:
frag2Energylist.append(val['fragEnergy'])
frag2Orblist.append(val['fragOrb'])
for key, val in list(orbList.items()):
complexEnergylist.append(val['fragOrb1']['compEnergy'])
complexOrblist.append(val['fragOrb1']['compOrb'])
frag1EnergylistModified = ModifyOrbLevel(frag1Energylist, energyGap, frag1Orblist)
frag2EnergylistModified = ModifyOrbLevel(frag2Energylist, energyGap, frag2Orblist)
complexEnergylistModified = ModifyOrbLevel(complexEnergylist, energyGap, complexOrblist)
for orbList in dataSummarylist:
for i in orbList:
frag1Counter = 0
frag2Counter = 0
for j in orbList[i]:
orbList[i][j]["complexModifiedenergy"]=complexEnergylistModified[complexCountertotal]
if orbList[i][j]['fragType'] == "frag1":
orbList[i][j]["fragModifiedenergy"]=frag1EnergylistModified[frag1Countertotal]
frag1Countertotal += 1
frag1Counter += 1
else:
orbList[i][j]["fragModifiedenergy"]=frag2EnergylistModified[frag2Countertotal]
frag2Countertotal += 1
frag2Counter += 1
frag1Countertotal = frag1Countertotal - frag1Counter
frag2Countertotal = frag2Countertotal - frag2Counter
complexCountertotal += 1
frag1Countertotal = frag1Countertotal + frag1Counter
frag2Countertotal = frag2Countertotal + frag2Counter
return dataSummarylist
def OrbMixing(orbital, mixlist, color='b', times=0):
def ModifyMOName(MO):
# Taking MO such as HOMO LUMO HOMO-1 LUMO+1, convert into H L L+1 H-1
if MO == 'HOMO':
newMO = 'H' + ' '
elif MO == 'LUMO':
# "L need a extra empty space, which is weired"
newMO = 'L' + ' '
else:
newMO = MO[0:1] + MO[-2:]
return newMO
# add levels
message = ''
orbNumber = 1
mix1Frag1number = 0
# add empty space
orbital.AddLevel(-2, color='w')
# add frag1 level
# check if there are no frag1 orbital invloved
frag1Orbnum = 0
for key, val in list(mixlist['compOrb1'].items()):
if val['fragType'] == 'frag1':
frag1Orbnum = frag1Orbnum+1
if frag1Orbnum == 0:
orbital.AddLevel(-2, color='w')
orbNumber = 2
else:
i = 0
for key, val in list(mixlist['compOrb1'].items()):
if val['fragType'] == 'frag1':
mix1Frag1number+=1
label = val['fragOrb']
energy = val['fragEnergy']
population= val['population']
name = val['fragName']
message = '{0:.3f}'.format(population) + 'e'+ ' ' + '{0:.1f}'.format(energy) + ' ' + str(name)+ ' ' + str(ModifyMOName(val['fragOrb']))
# modify the degenerated levels to the slightly different energy to show on the diagram
energy = val['fragModifiedenergy']
if i == 0:
orbital.AddLevel(energy, leftText=message, color=color)
else:
orbital.AddLevel(energy, position='last', leftText=message, color=color)
i+=1
if GetFrontIndex(label)['hoLu'] == 'HOMO':
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=2)
else:
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=0)
orbNumber+=1
# add complex level
i = 0
for key, val in list(mixlist.items()):
label = val['fragOrb1']['compOrb']
energy = val['fragOrb1']['compEnergy']
name = val['fragOrb1']['compName']
message = str(ModifyMOName(val['fragOrb1']['compOrb'])) + ' ' + str(name) + ' ' + '{0:.1f}'.format(energy)
# modify the degenerated levels to the slightly different energy to show on the diagram
energy = val['fragOrb1']['complexModifiedenergy']
if times == 0:
if i == 0:
orbital.AddLevel(energy, rightText=message, color=color)
else:
orbital.AddLevel(energy, position='last', rightText=message, color=color)
else:
if i == 0:
orbital.AddLevel(energy, leftText=message, color=color)
else:
orbital.AddLevel(energy, position='last', leftText=message, color=color)
i+=1
if GetFrontIndex(label)['hoLu'] == 'HOMO':
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=2)
else:
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=0)
orbNumber+=1
# add frag2 levels
i = 0
for key, val in list(mixlist['compOrb1'].items()):
if val['fragType'] == 'frag2':
label = val['fragOrb']
energy = val['fragEnergy']
population= val['population']
name = val['fragName']
message = str(ModifyMOName(val['fragOrb'])) + ' ' + str(name) + ' ' + '{0:.1f}'.format(energy) + ' ' + '{0:.3f}'.format(population) + 'e'
# modify the degenerated levels to the slightly different energy to show on the diagram
energy = val['fragModifiedenergy']
# move one step right
if i == 0:
orbital.AddLevel(energy, rightText=message, color=color)
else:
orbital.AddLevel(energy, position='last', rightText=message, color=color)
i+=1
if GetFrontIndex(label)['hoLu'] == 'HOMO':
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=2)
else:
orbital.AddElectronBox(levelId=orbNumber,boxes=1, electrons=0)
orbNumber+=1
# add frag1 and complex link
i = 1
for frag1Orb, frag1Val in list(mixlist['compOrb1'].items()):
if frag1Val['fragType'] == 'frag1':
k = 1
for compOrb, compVal in list(mixlist.items()):
coefi = compVal[frag1Orb]['fragCoef']
orbital.AddLink(i, k+mix1Frag1number, color=color, coeficient=coefi, reverse=True)
k+=1
i+=1
# add complex and frag2 link
if frag1Orbnum == 0:
mix1Frag1number = 1
i = 1
for compOrb, compVal in list(mixlist.items()):
k = 1
for frag2Orb, frag2Val in list(mixlist['compOrb1'].items()):
if frag2Val['fragType'] == 'frag2':
coefi = compVal[frag2Orb]['fragCoef']
orbital.AddLink(i+mix1Frag1number, k+mix1Frag1number+len(mixlist), color=color, coeficient=coefi, reverse=True, fragment2complex=False)
k+=1
i+=1
orbital.AddLevel(-2, color='w')
return orbital
def DrawOrbLevels(dataSummarylist, fileName):
energyList = []
for orbList in dataSummarylist:
for key, val in list(orbList['compOrb1'].items()):
energyList.append(val['fragEnergy'])
for key, val in list(orbList.items()):
energyList.append(val['fragOrb1']['compEnergy'])
energyGap = abs(max(energyList) - min(energyList))
dataSummarylistNew = NewdataSummarylist(dataSummarylist, energyGap=energyGap*ADJUSTEDCOEFFICIENT)
orbital = ED(energyGap=energyGap*0.8)
colorList = ['red', 'green', 'black', 'tan', 'red', 'black', 'red', 'green', 'darkorange', 'tan', 'black' ]
orbital = OrbMixing(orbital, dataSummarylistNew[0], color='b', times = 0)
orbital.Plot(showIDs=True)
i = 1
for orbList in dataSummarylistNew[1:]:
orbital.ReInit()
evenOdd= i - (i//2)*2
orbital = OrbMixing(orbital, orbList, color=colorList[i-1], times = evenOdd)
orbital.Plot(showIDs=True)
i = i+1
plt.savefig(fileName+'.png',dpi=300)
# plt.show()
plt.close()
| 2.6875 | 3 |
questioneer/questions/__init__.py | markdentoom/Questioneer | 0 | 12769596 | # https://docs.djangoproject.com/en/4.0/ref/applications/#for-application-authors
# Shouldn't be needed for Django >= 3.2
default_app_config = "questions.apps.QuestionsConfig"
| 1.164063 | 1 |
project/settings.py | AnttiKoistinen431a/parking-permits | 0 | 12769597 | from os import getenv
from pathlib import Path
import dj_database_url
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = getenv("DJANGO_SECRET_KEY")
DEBUG = getenv("DEBUG") == "true"
ALLOWED_HOSTS = ["*"]
AUTH_USER_MODEL = "users_app.CustomUser"
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
# disable Django’s static file handling during development so that whitenoise can take over
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"parking_permits_app",
"users_app",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
# WhiteNoiseMiddleware should be above all and just below SecurityMiddleware
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "project.wsgi.application"
DATABASE_URL = getenv("DATABASE_URL")
DATABASES = {"default": dj_database_url.parse(DATABASE_URL)}
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = "/static/"
STATIC_ROOT = BASE_DIR / "static-files"
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
| 1.835938 | 2 |
ci/deployment-tests/runtime_config_deploymenttest.py | RedisLabs/tile-generator | 0 | 12769598 | <reponame>RedisLabs/tile-generator
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
import requests
from tile_generator import opsmgr
class VerifyRuntimeConfig(unittest.TestCase):
def setUp(self):
self.cfinfo = opsmgr.get_cfinfo()
self.schema_version = self.cfinfo['schema_version']
def test_yes(self):
if re.match(r'1\.+', self.schema_version):
print("OpsMan is too old to support runtime config, skipping test")
return
hostname = 'runtime-conf-yes.' + self.cfinfo['system_domain']
url = 'http://' + hostname
response = requests.get(url)
self.assertEqual(response.status_code, 200)
self.assertRegexpMatches(response.text, r'.*Runtime Test Release: Success.*')
def test_no(self):
if re.match(r'1\.+', self.schema_version):
print("OpsMan is too old to support runtime config, skipping test")
return
hostname = 'runtime-conf-no.' + self.cfinfo['system_domain']
url = 'http://' + hostname
response = requests.get(url)
self.assertEqual(response.status_code, 502)
| 2.03125 | 2 |
flink-python/pyflink/table/tests/test_environment_settings.py | imaffe/flink | 1 | 12769599 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.common import Configuration
from pyflink.table import EnvironmentSettings
from pyflink.testing.test_case_utils import PyFlinkTestCase, get_private_field
class EnvironmentSettingsTests(PyFlinkTestCase):
def test_planner_selection(self):
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.check_blink_planner(environment_settings)
# test use_blink_planner
environment_settings = EnvironmentSettings.new_instance().use_blink_planner().build()
self.check_blink_planner(environment_settings)
# test use_any_planner
environment_settings = builder.use_any_planner().build()
self.check_any_planner(environment_settings)
def test_mode_selection(self):
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertTrue(environment_settings.is_streaming_mode())
# test in_streaming_mode
environment_settings = builder.in_streaming_mode().build()
self.assertTrue(environment_settings.is_streaming_mode())
environment_settings = EnvironmentSettings.in_streaming_mode()
self.assertTrue(environment_settings.is_streaming_mode())
# test in_batch_mode
environment_settings = builder.in_batch_mode().build()
self.assertFalse(environment_settings.is_streaming_mode())
environment_settings = EnvironmentSettings.in_batch_mode()
self.assertFalse(environment_settings.is_streaming_mode())
def test_with_built_in_catalog_name(self):
gateway = get_gateway()
DEFAULT_BUILTIN_CATALOG = gateway.jvm.EnvironmentSettings.DEFAULT_BUILTIN_CATALOG
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertEqual(environment_settings.get_built_in_catalog_name(), DEFAULT_BUILTIN_CATALOG)
environment_settings = builder.with_built_in_catalog_name("my_catalog").build()
self.assertEqual(environment_settings.get_built_in_catalog_name(), "my_catalog")
def test_with_built_in_database_name(self):
gateway = get_gateway()
DEFAULT_BUILTIN_DATABASE = gateway.jvm.EnvironmentSettings.DEFAULT_BUILTIN_DATABASE
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertEqual(environment_settings.get_built_in_database_name(),
DEFAULT_BUILTIN_DATABASE)
environment_settings = builder.with_built_in_database_name("my_database").build()
self.assertEqual(environment_settings.get_built_in_database_name(), "my_database")
def test_to_configuration(self):
expected_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
config = expected_settings.to_configuration()
self.assertEqual("BATCH", config.get_string("execution.runtime-mode", "stream"))
def test_from_configuration(self):
config = Configuration()
config.set_string("execution.runtime-mode", "batch")
actual_setting = EnvironmentSettings.from_configuration(config)
self.assertFalse(actual_setting.is_streaming_mode(), "Use batch mode.")
def check_blink_planner(self, settings: EnvironmentSettings):
gateway = get_gateway()
CLASS_NAME = gateway.jvm.EnvironmentSettings.CLASS_NAME
builder = EnvironmentSettings.new_instance()
BLINK_PLANNER_FACTORY = get_private_field(builder._j_builder, "BLINK_PLANNER_FACTORY")
self.assertEqual(
settings._j_environment_settings.toPlannerProperties()[CLASS_NAME],
BLINK_PLANNER_FACTORY)
def check_any_planner(self, settings: EnvironmentSettings):
gateway = get_gateway()
CLASS_NAME = gateway.jvm.EnvironmentSettings.CLASS_NAME
self.assertTrue(
CLASS_NAME not in settings._j_environment_settings.toPlannerProperties())
| 1.703125 | 2 |
crypto/Residues/chal.py | Enigmatrix/hats-ctf-2019 | 5 | 12769600 | from secret import flag
from random import randint
from Crypto.Util.number import isPrime, GCD
def next_prime(s):
if s%6 == 0:
s += 1
elif s%6 != 1:
s = s - s%6 + 5
while True:
if isPrime(s) == True:
break
s+=2
if isPrime(s) == True:
break
s+=4
return s
def genrsa(s):
e = 65537
while True:
r = randint(0, 4**s)
p = next_prime(r)
q = next_prime(r + randint(1, 2**s))
if GCD(p-1, e) == GCD(q-1, e) == 1:
return p * q, 2*e
def enc(m, e, n):
c = hex(pow(m, e, n))[2:].rstrip('L')
return c
def main():
n, e = genrsa(512)
print '''--------*Residue*-------
The modulus is hidden
You only have 6 encryptions, use them wisely
Here is the encrypted flag: ''' + enc(flag, e, n)
for _ in range(6): # fix bound later
print "Enter a value to be encrypted:"
try:
m = int(raw_input(),16)
except:
print "Nope"
return -1
c = enc(m, e, n)
print "Cipher: " + c
if __name__ == '__main__':
main()
| 3.5 | 4 |
pygeocom.py | siyka-au/pygeocom | 4 | 12769601 | from typing import Tuple, Any
from enum import Enum, IntFlag
from datetime import datetime
from collections import namedtuple
from collections.abc import Callable
GRC_TPS = 0x0000 # main return codes (identical to RC_SUP!!)
GRC_SUP = 0x0000 # supervisor task (identical to RCBETA!!)
GRC_ANG = 0x0100 # angle- and inclination
GRC_ATA = 0x0200 # automatic target acquisition
GRC_EDM = 0x0300 # electronic distance meter
GRC_GMF = 0x0400 # geodesy mathematics & formulas
GRC_TMC = 0x0500 # measurement & calc
GRC_MEM = 0x0600 # memory management
GRC_MOT = 0x0700 # motorization
GRC_LDR = 0x0800 # program loader
GRC_BMM = 0x0900 # basics of man machine interface
GRC_TXT = 0x0A00 # text management
GRC_MMI = 0x0B00 # man machine interface
GRC_COM = 0x0C00 # communication
GRC_DBM = 0x0D00 # data base management
GRC_DEL = 0x0E00 # dynamic event logging
GRC_FIL = 0x0F00 # file system
GRC_CSV = 0x1000 # central services
GRC_CTL = 0x1100 # controlling task
GRC_STP = 0x1200 # start + stop task
GRC_DPL = 0x1300 # data pool
GRC_WIR = 0x1400 # wi registration
GRC_USR = 0x2000 # user task
GRC_ALT = 0x2100 # alternate user task
GRC_AUT = 0x2200 # automatization
GRC_AUS = 0x2300 # alternative user
GRC_BAP = 0x2400 # basic applications
GRC_SAP = 0x2500 # system applications
GRC_COD = 0x2600 # standard code function
GRC_BAS = 0x2700 # GeoBasic interpreter
GRC_IOS = 0x2800 # Input-/ output- system
GRC_CNF = 0x2900 # configuration facilities
GRC_XIT = 0x2E00 # XIT subsystem (Excite-Level LIS)
GRC_DNA = 0x2F00 # DNA2 subsystem
GRC_ICD = 0x3000 # cal data management
GRC_KDM = 0x3100 # keyboard display module
GRC_LOD = 0x3200 # firmware loader
GRC_FTR = 0x3300 # file transfer
GRC_VNF = 0x3F00 # reserved for new TPS1200 subsystem
GRC_GPS = 0x4000 # GPS subsystem
GRC_TST = 0x4100 # Test subsystem
GRC_PTF = 0x4F00 # reserved for new GPS1200 subsystem
GRC_APP = 0x5000 # offset for all applications
GRC_RES = 0x7000 # reserved code range
class ReturnCode(Enum):
GRC_OK = GRC_TPS + 0 # Function successfully completed.
GRC_UNDEFINED = GRC_TPS + 1 # Unknown error result unspecified.
GRC_IVPARAM = GRC_TPS + 2 # Invalid parameter detected.\nResult unspecified.
GRC_IVRESULT = GRC_TPS + 3 # Invalid result.
GRC_FATAL = GRC_TPS + 4 # Fatal error.
GRC_NOT_IMPL = GRC_TPS + 5 # Not implemented yet.
GRC_TIME_OUT = GRC_TPS + 6 # Function execution timed out.\nResult unspecified.
GRC_SET_INCOMPL = GRC_TPS + 7 # Parameter setup for subsystem is incomplete.
GRC_ABORT = GRC_TPS + 8 # Function execution has been aborted.
GRC_NOMEMORY = GRC_TPS + 9 # Fatal error - not enough memory.
GRC_NOTINIT = GRC_TPS + 10 # Fatal error - subsystem not initialized.
GRC_SHUT_DOWN = GRC_TPS + 12 # Subsystem is down.
GRC_SYSBUSY = GRC_TPS + 13 # System busy/already in use of another process.\nCannot execute function.
GRC_HWFAILURE = GRC_TPS + 14 # Fatal error - hardware failure.
GRC_ABORT_APPL = GRC_TPS + 15 # Execution of application has been aborted (SHIFT-ESC).
GRC_LOW_POWER = GRC_TPS + 16 # Operation aborted - insufficient power supply level.
GRC_IVVERSION = GRC_TPS + 17 # Invalid version of file ...
GRC_BATT_EMPTY = GRC_TPS + 18 # Battery empty
GRC_NO_EVENT = GRC_TPS + 20 # no event pending.
GRC_OUT_OF_TEMP = GRC_TPS + 21 # out of temperature range
GRC_INSTRUMENT_TILT = GRC_TPS + 22 # intrument tilting out of range
GRC_COM_SETTING = GRC_TPS + 23 # communication error
GRC_NO_ACTION = GRC_TPS + 24 # GRC_TYPE Input 'do no action'
GRC_SLEEP_MODE = GRC_TPS + 25 # Instr. run into the sleep mode
GRC_NOTOK = GRC_TPS + 26 # Function not successfully completed.
GRC_NA = GRC_TPS + 27 # Not available
GRC_OVERFLOW = GRC_TPS + 28 # Overflow error
GRC_STOPPED = GRC_TPS + 29 # System or subsystem has been stopped
GRC_COM_ERO = GRC_COM + 0 # Initiate Extended Runtime Operation (ERO).
GRC_COM_CANT_ENCODE = GRC_COM + 1 # Cannot encode arguments in client.
GRC_COM_CANT_DECODE = GRC_COM + 2 # Cannot decode results in client.
GRC_COM_CANT_SEND = GRC_COM + 3 # Hardware error while sending.
GRC_COM_CANT_RECV = GRC_COM + 4 # Hardware error while receiving.
GRC_COM_TIMEDOUT = GRC_COM + 5 # Request timed out.
GRC_COM_WRONG_FORMAT = GRC_COM + 6 # Packet format error.
GRC_COM_VER_MISMATCH = GRC_COM + 7 # Version mismatch between client and server.
GRC_COM_CANT_DECODE_REQ = GRC_COM + 8 # Cannot decode arguments in server.
GRC_COM_PROC_UNAVAIL = GRC_COM + 9 # Unknown RPC, procedure ID invalid.
GRC_COM_CANT_ENCODE_REP = GRC_COM + 10 # Cannot encode results in server.
GRC_COM_SYSTEM_ERR = GRC_COM + 11 # Unspecified generic system error.
GRC_COM_UNKNOWN_HOST = GRC_COM + 12 # (Unused error code)
GRC_COM_FAILED = GRC_COM + 13 # Unspecified error.
GRC_COM_NO_BINARY = GRC_COM + 14 # Binary protocol not available.
GRC_COM_INTR = GRC_COM + 15 # Call interrupted.
GRC_COM_UNKNOWN_ADDR = GRC_COM + 16 # (Unused error code)
GRC_COM_NO_BROADCAST = GRC_COM + 17 # (Unused error code)
GRC_COM_REQUIRES_8DBITS = GRC_COM + 18 # Protocol needs 8bit encoded chararacters.
GRC_COM_UD_ERROR = GRC_COM + 19 # (Unused error code)
GRC_COM_LOST_REQ = GRC_COM + 20 # (Unused error code)
GRC_COM_TR_ID_MISMATCH = GRC_COM + 21 # Transacation ID mismatch error.
GRC_COM_NOT_GEOCOM = GRC_COM + 22 # Protocol not recognizeable.
GRC_COM_UNKNOWN_PORT = GRC_COM + 23 # (WIN) Invalid port address.
GRC_COM_ILLEGAL_TRPT_SELECTOR = GRC_COM + 24 # (Unused error code)
GRC_COM_TRPT_SELECTOR_IN_USE = GRC_COM + 25 # (Unused error code)
GRC_COM_INACTIVE_TRPT_SELECTOR = GRC_COM + 26 # (Unused error code)
GRC_COM_ERO_END = GRC_COM + 27 # ERO is terminating.
GRC_COM_OVERRUN = GRC_COM + 28 # Internal error: data buffer overflow.
GRC_COM_SRVR_RX_CHECKSUM_ERROR = GRC_COM + 29 # Invalid checksum on server side received.
GRC_COM_CLNT_RX_CHECKSUM_ERROR = GRC_COM + 30 # Invalid checksum on client side received.
GRC_COM_PORT_NOT_AVAILABLE = GRC_COM + 31 # (WIN) Port not available.
GRC_COM_PORT_NOT_OPEN = GRC_COM + 32 # (WIN) Port not opened.
GRC_COM_NO_PARTNER = GRC_COM + 33 # (WIN) Unable to find TPS.
GRC_COM_ERO_NOT_STARTED = GRC_COM + 34 # Extended Runtime Operation could not be started.
GRC_COM_CONS_REQ = GRC_COM + 35 # Att to send cons reqs
GRC_COM_SRVR_IS_SLEEPING = GRC_COM + 36 # TPS has gone to sleep. Wait and try again.
GRC_COM_SRVR_IS_OFF = GRC_COM + 37 # TPS has shut down. Wait and try again.
GRC_EDM_SYSTEM_ERR = GRC_EDM + 1 # Fatal EDM sensor error. See for the exact reason the original EDM sensor error number. In the most cases a service problem.
# Sensor user errors
GRC_EDM_INVALID_COMMAND = GRC_EDM + 2 # Invalid command or unknown command, see command syntax.
GRC_EDM_BOOM_ERR = GRC_EDM + 3 # Boomerang error.
GRC_EDM_SIGN_LOW_ERR = GRC_EDM + 4 # Received signal to low, prisma to far away, or natural barrier, bad environment, etc.
GRC_EDM_DIL_ERR = GRC_EDM + 5 # Obsolete
GRC_EDM_SIGN_HIGH_ERR = GRC_EDM + 6 # Received signal to strong, prism too near, stranger light effect.
# New TPS1200 sensor user errors
GRC_EDM_TIMEOUT = GRC_EDM + 7 # Timeout, measuring time exceeded (signal too weak, beam interrupted,..)
GRC_EDM_FLUKT_ERR = GRC_EDM + 8 # Too much turbulences or distractions
GRC_EDM_FMOT_ERR = GRC_EDM + 9 # Filter motor defective
# Subsystem errors
GRC_EDM_DEV_NOT_INSTALLED = GRC_EDM + 10 # Device like EGL, DL is not installed.
GRC_EDM_NOT_FOUND = GRC_EDM + 11 # Search result invalid. For the exact explanation \nsee in the description of the called function.
GRC_EDM_ERROR_RECEIVED = GRC_EDM + 12 # Communication ok, but an error\nreported from the EDM sensor.
GRC_EDM_MISSING_SRVPWD = GRC_EDM + 13 # No service password is set.
GRC_EDM_INVALID_ANSWER = GRC_EDM + 14 # Communication ok, but an unexpected\nanswer received.
GRC_EDM_SEND_ERR = GRC_EDM + 15 # Data send error, sending buffer is full.
GRC_EDM_RECEIVE_ERR = GRC_EDM + 16 # Data receive error, like\nparity buffer overflow.
GRC_EDM_INTERNAL_ERR = GRC_EDM + 17 # Internal EDM subsystem error.
GRC_EDM_BUSY = GRC_EDM + 18 # Sensor is working already,\nabort current measuring first.
GRC_EDM_NO_MEASACTIVITY = GRC_EDM + 19 # No measurement activity started.
GRC_EDM_CHKSUM_ERR = GRC_EDM + 20 # Calculated checksum, resp. received data wrong\n(only in binary communication mode possible).
GRC_EDM_INIT_OR_STOP_ERR = GRC_EDM + 21 # During start up or shut down phase an\nerror occured. It is saved in the DEL buffer.
GRC_EDM_SRL_NOT_AVAILABLE = GRC_EDM + 22 # Red laser not available on this sensor HW.
GRC_EDM_MEAS_ABORTED = GRC_EDM + 23 # Measurement will be aborted (will be used for the lasersecurity)
# New TPS1200 sensor user error
GRC_EDM_SLDR_TRANSFER_PENDING = GRC_EDM + 30 # Multiple OpenTransfer calls.
GRC_EDM_SLDR_TRANSFER_ILLEGAL = GRC_EDM + 31 # No opentransfer happened.
GRC_EDM_SLDR_DATA_ERROR = GRC_EDM + 32 # Unexpected data format received.
GRC_EDM_SLDR_CHK_SUM_ERROR = GRC_EDM + 33 # Checksum error in transmitted data.
GRC_EDM_SLDR_ADDR_ERROR = GRC_EDM + 34 # Address out of valid range.
GRC_EDM_SLDR_INV_LOADFILE = GRC_EDM + 35 # Firmware file has invalid format.
GRC_EDM_SLDR_UNSUPPORTED = GRC_EDM + 36 # Current (loaded) firmware doesn't support upload.
GRC_EDM_UNKNOW_ERR = GRC_EDM + 40 # Undocumented error from the\nEDM sensor, should not occur.
GRC_EDM_DISTRANGE_ERR = GRC_EDM + 50 # Out of distance range (dist too small or large)
GRC_EDM_SIGNTONOISE_ERR = GRC_EDM + 51 # Signal to noise ratio too small
GRC_EDM_NOISEHIGH_ERR = GRC_EDM + 52 # Noise to high
GRC_EDM_PWD_NOTSET = GRC_EDM + 53 # Password is not set
GRC_EDM_ACTION_NO_MORE_VALID = GRC_EDM + 54 # Elapsed time between prepare und start fast measurement for ATR to long
GRC_EDM_MULTRG_ERR = GRC_EDM + 55 # Possibly more than one target (also a sensor error)
GRC_MOT_UNREADY = GRC_MOT + 0 # motorization is not ready (1792)
GRC_MOT_BUSY = GRC_MOT + 1 # motorization is handling another task (1793)
GRC_MOT_NOT_OCONST = GRC_MOT + 2 # motorization is not in velocity mode (1794)
GRC_MOT_NOT_CONFIG = GRC_MOT + 3 # motorization is in the wrong mode or busy (1795)
GRC_MOT_NOT_POSIT = GRC_MOT + 4 # motorization is not in posit mode (1796)
GRC_MOT_NOT_SERVICE = GRC_MOT + 5 # motorization is not in service mode (1797)
GRC_MOT_NOT_BUSY = GRC_MOT + 6 # motorization is handling no task (1798)
GRC_MOT_NOT_LOCK = GRC_MOT + 7 # motorization is not in tracking mode (1799)
GRC_MOT_NOT_SPIRAL = GRC_MOT + 8 # motorization is not in spiral mode (1800)
GRC_TMC_NO_FULL_CORRECTION = GRC_TMC + 3 # Warning: measurment without full correction
GRC_TMC_ACCURACY_GUARANTEE = GRC_TMC + 4 # Info : accuracy can not be guarantee
GRC_TMC_ANGLE_OK = GRC_TMC + 5 # Warning: only angle measurement valid
GRC_TMC_ANGLE_NOT_FULL_CORR = GRC_TMC + 8 # Warning: only angle measurement valid but without full correction
GRC_TMC_ANGLE_NO_ACC_GUARANTY = GRC_TMC + 9 # Info : only angle measurement valid but accuracy can not be guarantee
GRC_TMC_ANGLE_ERROR = GRC_TMC + 10 # Error : no angle measurement
GRC_TMC_DIST_PPM = GRC_TMC + 11 # Error : wrong setting of PPM or MM on EDM
GRC_TMC_DIST_ERROR = GRC_TMC + 12 # Error : distance measurement not done (no aim, etc.)
GRC_TMC_BUSY = GRC_TMC + 13 # Error : system is busy (no measurement done)
GRC_TMC_SIGNAL_ERROR = GRC_TMC + 14 # Error : no signal on EDM (only in signal mode)
GRC_BMM_XFER_PENDING = GRC_BMM + 1 # Loading process already opened
GRC_BMM_NO_XFER_OPEN = GRC_BMM + 2 # Transfer not opened
GRC_BMM_UNKNOWN_CHARSET = GRC_BMM + 3 # Unknown character set
GRC_BMM_NOT_INSTALLED = GRC_BMM + 4 # Display module not present
GRC_BMM_ALREADY_EXIST = GRC_BMM + 5 # Character set already exists
GRC_BMM_CANT_DELETE = GRC_BMM + 6 # Character set cannot be deleted
GRC_BMM_MEM_ERROR = GRC_BMM + 7 # Memory cannot be allocated
GRC_BMM_CHARSET_USED = GRC_BMM + 8 # Character set still used
GRC_BMM_CHARSET_SAVED = GRC_BMM + 9 # Charset cannot be deleted or is protected
GRC_BMM_INVALID_ADR = GRC_BMM + 10 # Attempt to copy a character block\noutside the allocated memory
GRC_BMM_CANCELANDADR_ERROR = GRC_BMM + 11 # Error during release of allocated memory
GRC_BMM_INVALID_SIZE = GRC_BMM + 12 # Number of bytes specified in header\ndoes not match the bytes read
GRC_BMM_CANCELANDINVSIZE_ERROR = GRC_BMM + 13 # Allocated memory could not be released
GRC_BMM_ALL_GROUP_OCC = GRC_BMM + 14 # Max. number of character sets already loaded
GRC_BMM_CANT_DEL_LAYERS = GRC_BMM + 15 # Layer cannot be deleted
GRC_BMM_UNKNOWN_LAYER = GRC_BMM + 16 # Required layer does not exist
GRC_BMM_INVALID_LAYERLEN = GRC_BMM + 17 # Layer length exceeds maximum
AUT_RC_TIMEOUT = GRC_AUT + 4 # Timeout, no target found
AUT_RC_DETENT_ERROR = GRC_AUT + 5 #
AUT_RC_ANGLE_ERROR = GRC_AUT + 6 #
AUT_RC_MOTOR_ERROR = GRC_AUT + 7 # Motorisation error
AUT_RC_INCACC = GRC_AUT + 8 #
AUT_RC_DEV_ERROR = GRC_AUT + 9 # Deviation measurement error
AUT_RC_NO_TARGET = GRC_AUT + 10 # No target detected
AUT_RC_MULTIPLE_TARGETS = GRC_AUT + 11 # Multiple targets detected
AUT_RC_BAD_ENVIRONMENT = GRC_AUT + 12 # Bad environment conditions
AUT_RC_DETECTOR_ERROR = GRC_AUT + 13 #
AUT_RC_NOT_ENABLED = GRC_AUT + 14 #
AUT_RC_CALACC = GRC_AUT + 15 #
AUT_RC_ACCURACY = GRC_AUT + 16 # Position not exactly reached
class byte(int):
def __new__(cls, value, *args, **kwargs):
if (type(value) == str):
value = int(value.strip("'"), 16)
elif (type(value) == bytes):
value = int(value.strip(b"'"), 16)
if value < 0:
raise ValueError("byte types must not be less than zero")
if value > 255:
raise ValueError("byte types must not be more than 255 (0xff)")
return super(cls, cls).__new__(cls, value)
class PrismType(Enum):
LEICA_ROUND = 0 # Prism type: Leica circular prism
LEICA_MINI = 1 # Prism type: Leica mini prism
LEICA_TAPE = 2 # Prism type: Leica reflective tape
LEICA_360 = 3 # Prism type: Leica 360° prism
USER1 = 4 # Prism type: User defined 1
USER2 = 5 # Prism type: User defined 2
USER3 = 6 # Prism type: User defined 3
LEICA_360_MINI = 7 # Prism type: Leica 360° mini
LEICA_MINI_ZERO = 8 # Prism type: Leica mini zero
LEICA_USER = 9 # Prism type: user???
LEICA_HDS_TAPE = 10 # Prism type: tape cyra???
LEICA_GRZ121_ROUND = 11 # Prism type: Leica GRZ121 round for machine guidance
class ReflectorType(Enum):
UNDEFINED = 0
PRISM = 1
TAPE = 2
class TargetType(Enum):
REFLECTOR = 0
REFLECTORLESS = 1
class InclinationSensorProgram(Enum):
TMC_MEA_INC = 0 # Use sensor (apriori sigma)
TMC_AUTO_INC = 1 # Automatic mode (sensor/plane)
TMC_PLANE_INC = 2 # Use plane (apriori sigma)
class EDMMode(Enum):
EDM_MODE_NOT_USED = 0 # Init value
EDM_SINGLE_TAPE = 1 # Single measurement with tape
EDM_SINGLE_STANDARD = 2 # Standard single measurement
EDM_SINGLE_FAST = 3 # Fast single measurement
EDM_SINGLE_LRANGE = 4 # Long range single measurement
EDM_SINGLE_SRANGE = 5 # Short range single measurement
EDM_CONT_STANDARD = 6 # Standard repeated measurement
EDM_CONT_DYNAMIC = 7 # Dynamic repeated measurement
EDM_CONT_REFLESS = 8 # Reflectorless repeated measurement
EDM_CONT_FAST = 9 # Fast repeated measurement
EDM_AVERAGE_IR = 10 # Standard average measurement
EDM_AVERAGE_SR = 11 # Short range average measurement
EDM_AVERAGE_LR = 12 # Long range average measurement
class DeviceClass(Enum):
# TPS1000 Family ------------------------ accuracy
TPS_CLASS_1100 = 0 # TPS1000 family member, 1 mgon, 3"
TPS_CLASS_1700 = 1 # TPS1000 family member, 0.5 mgon, 1.5"
TPS_CLASS_1800 = 2 # TPS1000 family member, 0.3 mgon, 1"
TPS_CLASS_5000 = 3 # TPS2000 family member
TPS_CLASS_6000 = 4 # TPS2000 family member
TPS_CLASS_1500 = 5 # TPS1000 family member
TPS_CLASS_2003 = 6 # TPS2000 family member
TPS_CLASS_5005 = 7 # TPS5000 "
TPS_CLASS_5100 = 8 # TPS5000 "
# TPS1100 Family ------------------------ accuracy
TPS_CLASS_1102 = 100 # TPS1000 family member, 2"
TPS_CLASS_1103 = 101 # TPS1000 family member, 3"
TPS_CLASS_1105 = 102 # TPS1000 family member, 5"
TPS_CLASS_1101 = 103 # TPS1000 family member, 1."
# TPS1200 Family ------------------------ accuracy
TPS_CLASS_1202 = 200 # TPS1200 family member, 2"
TPS_CLASS_1203 = 201 # TPS1200 family member, 3"
TPS_CLASS_1205 = 202 # TPS1200 family member, 5"
TPS_CLASS_1201 = 203 # TPS1200 family member, 1"
class DeviceType(IntFlag):
# TPS1x00 common
TPS_DEVICE_T = 0x00000 # Theodolite without built-in EDM
TPS_DEVICE_MOT = 0x00004 # Motorized device
TPS_DEVICE_ATR = 0x00008 # Automatic Target Recognition
TPS_DEVICE_EGL = 0x00010 # Electronic Guide Light
TPS_DEVICE_DB = 0x00020 # reserved (Database, not GSI)
TPS_DEVICE_DL = 0x00040 # Diode laser
TPS_DEVICE_LP = 0x00080 # Laser plumbed
# TPS1000 specific
TPS_DEVICE_TC1 = 0x00001 # tachymeter (TCW1)
TPS_DEVICE_TC2 = 0x00002 # tachymeter (TCW2)
# TPS1100/TPS1200 specific
TPS_DEVICE_TC = 0x00001 # tachymeter (TCW3)
TPS_DEVICE_TCR = 0x00002 # tachymeter (TCW3 with red laser)
TPS_DEVICE_ATC = 0x00100 # Autocollimation lamp (used only PMU)
TPS_DEVICE_LPNT = 0x00200 # Laserpointer
TPS_DEVICE_RL_EXT = 0x00400 # Reflectorless EDM with extended range (Pinpoint R100,R300)
TPS_DEVICE_PS = 0x00800 # Power Search
# TPSSim specific
TPS_DEVICE_SIM = 0x04000 # runs on Simulation, no Hardware
class PowerPath(Enum):
CURRENT_POWER = 0
EXTERNAL_POWER = 1
INTERNAL_POWER = 2
class RecordFormat(Enum):
GSI_8 = 0
GSI_16 = 1
class TPSStatus(Enum):
OFF = 0
SLEEPING = 1
ONLINE = 2
LOCAL = 3
UNKNOWN = 4
class OnOff(Enum):
OFF = 0
ON = 1
class EGLIntensity(Enum):
OFF = 0
LOW = 1
MID = 2
HIGH = 3
class ControllerMode(Enum):
RELATIVE_POSITIONING = 0
CONSTANT_SPEED = 1
MANUAL_POSITIONING = 2
LOCK_IN = 3
BRAKE = 4
TERMINATE = 7
class ControllerStopMode(Enum):
NORMAL = 0
SHUTDOWN = 1
class LockInStatus(Enum):
LOCKED_OUT = 0
LOCKED_IN = 1
PREDICTION = 2
class MeasurementMode(Enum):
NO_MEASUREMENTS = 0 # No measurements, take last one
NO_DISTANCE = 1 # No distance measurement, angles only
DEFAULT_DISTANCE = 2 # Default distance measurements, pre-defined using MeasurementProgram
CLEAR_DISTANCE = 5 # Clear distances
STOP_TRACKING = 6 # Stop tracking laser
class MeasurementProgram(Enum):
SINGLE_REF_STANDARD = 0 # standard single IR distance with reflector
SINGLE_REF_FAST = 1 # fast single IR distance with reflector
SINGLE_REF_VISIBLE = 2 # long range distance with reflector (red laser)
SINGLE_RLESS_VISIBLE = 3 # single RL distance reflector free (red laser)
CONT_REF_STANDARD = 4 # tracking IR distance with reflector
CONT_REF_FAST = 5 # fast tracking IR distance with reflector
CONT_RLESS_VISIBLE = 6 # fast tracking RL distance reflector free (red)
AVG_REF_STANDARD = 7 # Average IR distance with reflector
AVG_REF_VISIBLE = 8 # Average long range dist. with reflector (red)
AVG_RLESS_VISIBLE = 9 # Average RL distance reflector free (red laser)
class PositionMode(Enum):
NORMAL = 0
PRECISE = 1
class FineAdjustPositionMode(Enum):
NORM = 0 # Angle tolerance
POINT = 1 # Point tolerance
DEFINE = 2 # System independent positioning tolerance; set wit PyGeoCom.set_tolerance()
class ATRRecognitionMode(Enum):
POSITION = 0 # Positioning to the horizontal and vertical angle
TARGET = 1 # Positioning to a target in the environment of the horizontal and vertical angle
class TMCInclinationMode(Enum):
USE_SENSOR = 0
AUTOMATIC = 1
USE_PLANE = 2
class TMCMeasurementMode(Enum):
STOP = 0 # Stop measurement program
DEFAULT_DISTANCE = 1 # Default DIST-measurement program
DISTANCE_TRACKING = 2 # Distance-TRK measurement program
STOP_AND_CLEAR = 3 # TMC_STOP and clear data
SIGNAL = 4 # Signal measurement (test function)
RESTART = 6 # (Re)start measurement task
DISTANCE_RAPID_TRACKING = 8 # Distance-TRK measurement program
RED_LASER_TRACKING = 10 # Red laser tracking
TESTING_FREQUENCY = 11 # Frequency measurement (test)
class EDMMeasurementMode(Enum):
MODE_NOT_USER = 0
SINGLE_TAPE = 1
SINGLE_STANDARD = 2
SINGLE_FAST = 3
SINGLE_LRANGE = 4
SINGLE_SRANGE = 5
CONTINUOUS_STANDARD = 6
CONTINUOUS_DYNAMIC = 7
CONTINUOUS_REFLECTORLESS = 8
CONTINUOUS_FAST = 9
AVERAGE_IR = 10
AVERAGE_SR = 11
AVERAGE_LR = 12
class FacePosition(Enum):
NORMAL = 0
TURNED = 1
class ActualFace(Enum):
FACE_1 = 0
FACE_2 = 1
Coordinate = namedtuple('Coordinate', 'east north head')
Angles = namedtuple('Angles', 'hz, v')
def decode_string(data: bytes) -> str:
return data.decode('unicode_escape').strip('"')
def default_return_code_handler(return_code: int):
if (return_code != ReturnCode.GRC_OK):
raise Exception(return_code)
def noop_return_code_handler(return_code: int):
return
class PyGeoCom:
def __init__(self, stream, debug: bool = False):
self._stream = stream
self._stream.write(b'\n')
self._debug = debug
def _request(self, rpc_id: int, args: Tuple[Any, ...] = (), return_code_handler: Callable[[int], None] = default_return_code_handler) -> Tuple[Any, ...]:
def encode(arg) -> str:
if (type(arg) == str):
return '"{}"'.format(arg)
elif (type(arg) == int):
return '{}'.format(arg)
elif (type(arg) == float):
return '{}'.format(arg)
elif (type(arg) == bool):
return '1' if arg == True else '0'
elif (type(arg) == byte):
return "'{:02X}'".format(arg)
d = '\n%R1Q,{}:{}\r\n'.format(rpc_id, ','.join([encode(a) for a in args])).encode('ascii')
if self._debug: print(b'>> ' + d)
self._stream.write(d)
d = self._stream.readline()
if self._debug: print(b'<< ' + d)
header, parameters = d.split(b':', 1)
reply_type, geocom_return_code, transaction_id = header.split(b',')
assert reply_type == b'%R1P'
geocom_return_code = int(geocom_return_code)
transaction_id = int(transaction_id)
parameters = parameters.rstrip()
rpc_return_code, *p = parameters.split(b',')
rpc_return_code = ReturnCode(int(rpc_return_code))
return_code_handler(rpc_return_code)
return (geocom_return_code, rpc_return_code) + tuple(p)
def get_instrument_number(self) -> int:
_, _, instrument_number, = self._request(5003)
return int(instrument_number)
def get_instrument_name(self) -> str:
_, _, instrument_name = self._request(5004)
return decode_string(instrument_name)
def get_device_config(self) -> DeviceType:
_, _, device_class, device_type = self._request(5035)
return DeviceClass(int(device_class)), DeviceType(int(device_type))
def get_date_time(self) -> datetime:
_, _, year, month, day, hour, minute, second = self._request(5008)
year = int(year)
month = byte(month)
day = byte(day)
hour = byte(hour)
minute = byte(minute)
second = byte(second)
return datetime(year, month, day, hour, minute, second)
def set_date_time(self, dt: datetime):
self._request(5007, (dt.year, byte(dt.month), byte(dt.day), byte(dt.hour), byte(dt.minute), byte(dt.second)))
def get_software_version(self) -> Tuple[int, int, int]:
_, _, release, version, subversion = self._request(5034)
return int(release), int(version), int(subversion)
def check_power(self) -> Tuple[int, PowerPath, PowerPath]:
_, _, capacity, active_power, power_suggest = self._request(5039)
return int(capacity), PowerPath(int(active_power)), PowerPath(int(power_suggest))
def get_memory_voltage(self) -> float:
_, _, memory_voltage = self._request(5010)
return float(memory_voltage)
def get_internal_temperature(self) -> float:
_, _, internal_temperature = self._request(5011)
return float(internal_temperature)
def get_up_counter(self) -> Tuple[int, int]:
_, _, power_on, wake_up = self._request(12003)
return int(power_on), int(wake_up)
def get_binary_available(self) -> bool:
_, _, binary_available, = self._request(113)
return bool(binary_available)
def get_record_format(self) -> RecordFormat:
_, _, record_format, = self._request(8011)
return RecordFormat(int(record_format)),
def set_record_format(self, record_format: RecordFormat):
self._request(8012, (record_format.value,))
def get_double_precision_setting(self) -> int:
_, _, number_of_digits, = self._request(108)
return int(number_of_digits)
def set_double_precision_setting(self, number_of_digits: int):
if number_of_digits < 0:
raise ValueError("Number of digits must be greater than or equal to 0")
if number_of_digits > 15:
raise ValueError("Number of digits must be lesser than or equal to 15")
self._request(107, (number_of_digits,))
def laser_pointer(self, state: OnOff):
self._request(1004, (state.value,))
def laser_pointer_on(self):
self.laser_pointer(OnOff.ON)
def laser_pointer_off(self):
self.laser_pointer(OnOff.OFF)
# Not tested as I don't have a device with an EGL
def get_egl_intensity(self) -> EGLIntensity:
_, _, intensity, = self._request(1058)
return EGLIntensity(int(intensity))
# Not tested as I don't have a device with an EGL
def set_egl_intensity(self, intensity: EGLIntensity):
self._request(1059, (intensity.value,))
def get_motor_lock_status(self) -> LockInStatus:
_, _, motor_lock_status, = self._request(6021)
return LockInStatus(int(motor_lock_status))
def start_controller(self, controller_mode: ControllerMode):
self._request(6001, (controller_mode.value,))
def stop_controller(self, controller_stop_mode: ControllerStopMode):
self._request(6002, (controller_stop_mode.value,))
# Speed is in radians/second, with a maximum of ±0.79rad/s each
def set_velocity(self, hoziontal_speed: float, vertical_speed: float):
MAX_SPEED = 0.79 # rad/s
if abs(hoziontal_speed) > MAX_SPEED:
raise ValueError("Horizontal speed exceeds the ±0.79 range")
if abs(vertical_speed) > MAX_SPEED:
raise ValueError("Horizontal speed exceeds the ±0.79 range")
self._request(6004, (hoziontal_speed, vertical_speed))
def get_target_type(self) -> TargetType:
_, _, target_type, = self._request(17022)
return TargetType(int(target_type))
def set_target_type(self, target_type: TargetType):
self._request(17021, (target_type.value,))
def get_prism_type(self) -> PrismType:
_, _, prism_type, = self._request(17009)
return PrismType(int(prism_type))
def set_prism_type(self, prism_type: PrismType):
self._request(17008, (prism_type.value,))
def get_prism_definition(self, prism_type: PrismType) -> Tuple[str, float, ReflectorType]:
_, _, name, correction, reflector_type = self._request(17023, (prism_type.value,))
name = decode_string(name)
correction = float(correction)
reflector_type = ReflectorType(int(reflector_type))
return name, correction, reflector_type
def set_prism_definition(self, prism_type: PrismType, name: str, correction: float, reflector_type: ReflectorType):
self._request(17024, (prism_type.value, name, correction, reflector_type.value))
def get_measurement_program(self) -> MeasurementProgram:
_, _, measurement_program, = self._request(17018)
return MeasurementProgram(int(measurement_program))
def set_measurement_program(self, measurement_program: MeasurementProgram):
self._request(17019, (measurement_program.value,))
def measure_distance_and_angles(self, measurement_mode: MeasurementMode) -> Tuple[MeasurementMode, float, float, float]:
_, _, horizontal, vertical, distance, measurement_mode = self._request(17017, (measurement_mode.value,))
horizontal = float(horizontal)
vertical = float(vertical)
distance = float(distance)
measurement_mode = MeasurementMode(int(measurement_mode))
return measurement_mode, horizontal, vertical, distance
def search_target(self):
self._request(17020, (0,))
def get_server_software_version(self) -> Tuple[int, int, int]:
_, _, release, version, subversion = self._request(110)
return int(release), int(version), int(subversion)
def set_send_delay(self, delay_ms: int):
self._request(109, (delay_ms,))
def local_mode(self):
self._request(1)
def get_user_atr_state(self) -> OnOff:
_, _, atr_state, = self._request(18006)
return OnOff(int(atr_state))
def set_user_atr_state(self, atr_state: OnOff):
self._request(18005, (atr_state.value,))
def user_atr_state_on(self):
self.set_user_atr_state(OnOff.ON)
def user_atr_state_off(self):
self.set_user_atr_state(OnOff.OFF)
def get_user_lock_state(self) -> OnOff:
_, _, lock_state, = self._request(18008)
return OnOff(int(lock_state))
def set_user_lock_state(self, lock_state: OnOff):
self._request(18007, (lock_state.value,))
def user_lock_state_on(self):
self.set_user_lock_state(OnOff.ON)
def user_lock_state_off(self):
self.set_user_lock_state(OnOff.OFF)
def get_rcs_search_switch(self) -> OnOff:
"""This command gets the current RCS-Searching mode switch. If RCS style searching
is enabled, then the extended searching for BAP_SearchTarget or after a loss of
lock is activated. This command is valid for TCA instruments only.
:returns: state of the RCS searching switch
:rtype: OnOff
"""
_, _, search_switch, = self._request(18010)
return OnOff(int(search_switch))
def switch_rcs_search(self, search_switch: OnOff):
self._request(18009, (search_switch.value,))
def get_tolerance(self) -> Tuple[float, float]:
_, _, horizontal_tolerance, vertical_tolerance = self._request(9008)
return float(horizontal_tolerance), float(vertical_tolerance)
def set_tolerance(self, horizontal_tolerance: float, vertical_tolerance: float):
self._request(9007, (horizontal_tolerance, vertical_tolerance))
def get_positioning_timeout(self) -> Tuple[float, float]:
_, _, horizontal_timeout, vertical_timeout = self._request(9012)
return float(horizontal_timeout), float(vertical_timeout)
def set_positioning_timeout(self, horizontal_timeout: float, vertical_timeout: float):
self._request(9011, (horizontal_timeout, vertical_timeout))
def position(self, horizontal: float, vertical: float, position_mode: PositionMode = PositionMode.NORMAL, atr_mode: ATRRecognitionMode = ATRRecognitionMode.POSITION):
self._request(9027, (horizontal, vertical, position_mode.value, atr_mode.value, False))
def change_face(self, position_mode: PositionMode = PositionMode.NORMAL, atr_mode: ATRRecognitionMode = ATRRecognitionMode.POSITION):
self._request(9028, (position_mode.value, atr_mode.value, False))
def fine_adjust(self, horizontal_search_range: float, vertical_search_range: float):
self._request(9037, (horizontal_search_range, vertical_search_range, False))
def search(self, horizontal_search_range: float, vertical_search_range: float):
self._request(9029, (horizontal_search_range, vertical_search_range, False))
def get_fine_adjust_mode(self) -> FineAdjustPositionMode:
_, _, fine_adjust_mode, = self._request(9030)
return FineAdjustPositionMode(float(fine_adjust_mode))
def set_fine_adjust_mode(self, fine_adjust_mode: FineAdjustPositionMode):
self._request(9031, (fine_adjust_mode.value,))
def lock_in(self):
self._request(9013)
def get_search_area(self) -> Tuple[float, float, float, float, bool]:
_, _, horizontal_centre, vertical_centre, horizontal_range, vertical_range, enabled = self._request(9042)
horizontal_centre = float(horizontal_centre)
vertical_centre = float(vertical_centre)
horizontal_range = float(horizontal_range)
vertical_range = float(vertical_range)
enabled = bool(enabled)
return horizontal_centre, vertical_centre, horizontal_range, vertical_range, enabled
def set_search_area(self, horizontal_centre: float, vertical_centre: float, horizontal_range: float, vertical_range: float, enabled: bool):
self._request(9043, (horizontal_centre, vertical_centre, horizontal_range, vertical_range, enabled))
def get_search_spiral(self) -> Tuple[float, float]:
_, _, horizontal_range, vertical_range = self._request(9040)
return float(horizontal_range), float(vertical_range)
def set_search_spiral(self, horizontal_range: float, vertical_range: float):
self._request(9041, (horizontal_range, vertical_range))
def get_coordinate(self, inclination_mode: TMCInclinationMode, wait_time: int = 1000) -> Tuple[Coordinate, int, Coordinate, int]:
_, _, e, n, h, measure_time, e_cont, n_cont, h_cont, measure_time_cont = self._request(2082, (wait_time, inclination_mode.value), return_code_handler=noop_return_code_handler)
coordinate = Coordinate(float(e), float(n), float(h))
coordinate_cont = Coordinate(float(e_cont), float(n_cont), float(h_cont))
measure_time = int(measure_time)
measure_time_cont = int(measure_time_cont)
return coordinate, measure_time, coordinate_cont, measure_time_cont
def get_simple_measurement(self, inclination_mode: TMCInclinationMode, wait_time: int = 1000) -> Tuple[Angles, float]:
_, _, horizontal, vertical, slope_distance = self._request(2108, (wait_time, inclination_mode.value,))
angles = Angles(float(horizontal), float(vertical))
slope_distance = float(slope_distance)
return angles, slope_distance
def get_angles_simple(self, inclination_mode: TMCInclinationMode) -> Angles:
_, _, horizontal, vertical = self._request(2107, (inclination_mode.value,))
return Angles(float(horizontal), float(vertical))
def get_angles_complete(self, inclination_mode: TMCInclinationMode) -> Tuple[Angles, float, int, float, float, float, int, FacePosition]:
_, _, horizontal, vertical, angle_accuracy, angle_measure_time, cross_inclincation, length_inclination, incline_accuracy, incline_measurement_time, face_position = self._request(2003, (inclination_mode.value,))
angles = Angles(float(horizontal), float(vertical))
angle_accuracy = float(angle_accuracy)
angle_measure_time = float(angle_measure_time)
cross_inclincation = float(cross_inclincation)
length_inclination = float(length_inclination)
incline_accuracy = float(incline_accuracy)
incline_measurement_time = int(incline_measurement_time)
face_position = FacePosition(int(face_position))
return angles, angle_accuracy, angle_measure_time, cross_inclincation, length_inclination, incline_accuracy, incline_measurement_time, face_position
def do_measure(self, measurement_mode: TMCMeasurementMode, inclination_mode: TMCInclinationMode):
self._request(2008, (measurement_mode.value, inclination_mode.value,))
| 2.09375 | 2 |
nixtract/cli/cifti.py | htwangtw/nixtract | 4 | 12769602 | <gh_stars>1-10
"""Functions for command line interface
"""
import argparse
import os
import shutil
from nilearn.signal import clean
from nixtract.cli.base import (base_cli, handle_base_args, replace_file_ext,
make_param_file, check_glob, run_extraction)
from nixtract.extractors import CiftiExtractor
def _cli_parser():
"""Reads CIFTI CLI arguments and returns input specifications combined with
those from the general CLI
"""
parser = argparse.ArgumentParser()
# input files
parser.add_argument('--input_files', nargs='+', type=str,
help='One or more input CIFTI dtseries files '
'(.dtseries.nii). Can also be a single string '
'with wildcards (*) to specify all files matching '
'the file pattern. If so, these files are '
'naturally sorted by file name prior to '
'extraction')
parser.add_argument('--roi_file', type=str,
help='CIFTI dlabel file (.dlabel.nii) with one or more '
'labels')
# other
parser.add_argument('--as_vertices', default=False,
action='store_true',
help='Extract the timeseries of each vertex in a '
'a region rather than the mean timeseries.This is '
'only available for when `roi_file` is single '
'region, i.e. a binary mask. Default: False')
parser.add_argument('--denoise-pre-extract', default=False,
action='store_true',
help='Denoise data (e.g., filtering, confound '
'regression) before timeseries extraction. '
'Otherwise, denoising is done on the extracted '
'timeseries, which is consistent with nilearn and '
'is more computationally efficient. Default: False')
parser = base_cli(parser)
return parser.parse_args()
def _check_cifti_params(params):
"""Ensure that required fields are included and correctly formatted"""
params = handle_base_args(params)
if params['input_files'] is None:
raise ValueError('Missing input_files. Check files')
else:
params['input_files'] = check_glob(params['input_files'])
# glob returned nothing
if not params['input_files']:
raise ValueError('Missing input files. Check files')
if not params['roi_file']:
raise ValueError('Missing roi_file input.')
return params
def extract_cifti(input_file, roi_file, regressor_file, params):
"""Extract timeseries from a CIFTI image
Parameters
----------
input_files : str
File path of the input .dtseries.nii file
roi_file : str
File path of the input .dlabel.nii file.
regressor_file : str
File path of regressor file
params : dict
Parameter dictionary for extraction
"""
extractor = CiftiExtractor(
fname=input_file,
roi_file=roi_file,
as_vertices=params['as_vertices'],
verbose=params['verbose'],
pre_clean=params['denoise_pre_extract'],
standardize=params['standardize'],
t_r=params['t_r'],
high_pass=params['high_pass'],
low_pass=params['low_pass'],
detrend=params['detrend']
)
if regressor_file is not None:
extractor.set_regressors(regressor_file, params['regressors'],
params["load_confounds_kwargs"])
if (params['discard_scans'] is not None) and (params['discard_scans'] > 0):
extractor.discard_scans(params['discard_scans'])
extractor.extract()
out = os.path.join(params['out_dir'], replace_file_ext(input_file))
extractor.save(out, params['n_decimals'])
return out, extractor
def main():
params = vars(_cli_parser())
params = _check_cifti_params(params)
metadata_path = make_param_file(params)
shutil.copy2(params['roi_file'], metadata_path)
run_extraction(extract_cifti, params['input_files'], params['roi_file'],
params)
if __name__ == '__main__':
raise RuntimeError("`nixtract/cli/cifti.py` should not be run directly. "
"Please `pip install` nixtract and use the "
"`nixtract-cifti` command.")
| 2.3125 | 2 |
RLcafe/frozen_lake.py | SimonPreissner/machine-learning-potpourri | 3 | 12769603 | '''Code from python notebook by simoninithomas
available at https://github.com/simoninithomas/Deep_reinforcement_learning_Course/blob/master/Q%20learning/Q%20Learning%20with%20FrozenLake.ipynb
'''
import numpy as np
import gym
import random
env = gym.make("FrozenLake-v0")
action_size = env.action_space.n
state_size = env.observation_space.n
qtable = np.zeros((state_size, action_size))
#print(qtable)
total_episodes = 10000 # Total episodes
learning_rate = 0.5 # Learning rate
max_steps = 50 # Max steps per episode
gamma = 0.95 # Discounting rate
# Exploration parameters
epsilon = 1.0 # Exploration rate
max_epsilon = 1.0 # Exploration probability at start
min_epsilon = 0.01 # Minimum exploration probability
decay_rate = 0.001 # Exponential decay rate for exploration prob
# List of rewards
rewards = []
# 2 For life or until learning is stopped
for episode in range(total_episodes):
# Reset the environment
state = env.reset()
step = 0
done = False
total_rewards = 0
print("EPISODE",episode)
for step in range(max_steps):
# 3. Choose an action a in the current world state (s)
## First we randomize a number
exp_exp_tradeoff = random.uniform(0, 1)
## If this number > greater than epsilon --> exploitation (taking the biggest Q value for this state)
if exp_exp_tradeoff > epsilon:
action = np.argmax(qtable[state,:])
#print("Let's exploit.", action)
env.render()
# Else doing a random choice --> exploration
else:
action = env.action_space.sample()
#print("Let's explore.",action)
env.render()
# Take the action (a) and observe the outcome state(s') and reward (r)
new_state, reward, done, info = env.step(action)
print("NEW STATE:",new_state,"REWARD:",reward)
# Update Q(s,a):= Q(s,a) + lr [R(s,a) + gamma * max Q(s',a') - Q(s,a)]
# qtable[new_state,:] : all the actions we can take from new state
qtable[state, action] = qtable[state, action] + learning_rate * (reward + gamma * np.max(qtable[new_state, :]) - qtable[state, action])
print("QTABLE AT",state,qtable[state])
total_rewards += reward
# Our new state is state
state = new_state
# If done (if we're dead) : finish episode
if done == True:
print("GAME OVER.\n\n")
break
episode += 1
# Reduce epsilon (because we need less and less exploration)
epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)
print(epsilon)
rewards.append(total_rewards)
print ("Score over time: " + str(sum(rewards)/total_episodes))
print(qtable)
env.reset()
for episode in range(0):
state = env.reset()
step = 0
done = False
print("****************************************************")
print("EPISODE ", episode)
for step in range(max_steps):
env.render()
# Take the action (index) that have the maximum expected future reward given that state
action = np.argmax(qtable[state,:])
new_state, reward, done, info = env.step(action)
if done:
break
state = new_state
env.close()
| 3.015625 | 3 |
old/scripts/play.py | raehik/dotfiles | 3 | 12769604 | #!/usr/bin/env python3
#
# Play a game.
#
import raehutils
import sys, os, argparse, logging
class PlayPy(raehutils.RaehBaseClass):
ERR_MATCH = 1
def __init__(self):
retroarch_cores_dir = os.environ.get("HOME") + "/.config/retroarch/cores"
games_dir = os.environ.get("HOME") + "/media/games-local"
self.games = {
"tome4": {
"name": "Tales of Maj'Eyal",
"cmd": ["tome4"]
},
"pokemon-emerald-jp": {
"name": "Pokemon Emerald (JP)",
"cmd": ["retroarch","-L",retroarch_cores_dir+"/vbam_libretro.so",games_dir+"/gba/official/Pocket Monsters - Emerald (Japan).gba"]
},
"melee": {
"name": "Super Smash Bros. Melee (20XX) [UFC]",
"cmd": [os.environ.get("HOME")+"/media/games-etc/platforms/pc/emulators/wii/faster-melee-v5.8.7-ucf-v0.73/dolphin-emu"]
},
"melee-no-ufc": {
"name": "Super Smash Bros. Melee (20XX)",
"cmd": [os.environ.get("HOME")+"/media/games-etc/platforms/pc/emulators/wii/faster-melee-v5.8.7/dolphin-emu"]
},
"melee-smashladder": {
"name": "Super Smash Bros. Melee [Netplay/Smashladder]",
"cmd": [os.environ.get("HOME")+"/media/games-etc/platforms/pc/emulators/wii/faster-melee-v5.9-fresh/dolphin-emu"]
},
"melee-uk-melee": {
"name": "Super Smash Bros. Melee [Netplay/UK Melee]",
"cmd": [os.environ.get("HOME")+"/media/games-etc/platforms/pc/emulators/wii/faster-melee-v5.8.7-fresh-uk-melee-ucf-v0.73/dolphin-emu"]
},
"retroarch": {
"name": "RetroArch (general)",
"cmd": ["retroarch"]
},
"mario-and-luigi-rpg": {
"name": "Mario & Luigi RPG (JP)",
"cmd": ["retroarch","-L",retroarch_cores_dir+"/vbam_libretro.so",games_dir+"/gba/official/mario-and-luigi-rpg-jp/1283 - Mario and Luigi RPG (J)(Rising Sun).gba"]
},
"elite-nes-pal": {
"name": "Elite (NES) (PAL)",
"cmd": ["retroarch","-L",retroarch_cores_dir+"/fceumm_libretro.so",games_dir+"/nes/official/elite/elite-pal.nes"]
},
}
self.workspace_num = "10"
## CLI-related {{{
def _parse_args(self):
self.parser = argparse.ArgumentParser(description="Play a game.")
self.parser.add_argument("-v", "--verbose", help="be verbose", action="count", default=0)
self.parser.add_argument("-q", "--quiet", help="be quiet (overrides -v)", action="count", default=0)
self.parser.add_argument("game", help="unique string of game to play")
self.args = self.parser.parse_args()
self._parse_verbosity()
## }}}
def main(self):
"""Main entrypoint after program initialisation."""
# get all possible matches
matches = [k for k, v in self.games.items() if k.startswith(self.args.game)]
if len(matches) < 1:
self.fail("no matching games for query: {}".format(self.args.game), PlayPy.ERR_MATCH)
if len(matches) > 1:
# if we found an exact match, override
exact_match = list(filter(lambda x: x == self.args.game, matches))
if len(exact_match) == 1:
matches = exact_match
else:
self.fail("query matches multiple games with no exact match: {}".format(", ".join(matches), PlayPy.ERR_MATCH))
game = self.games[matches[0]]
self.logger.info("matched game: {}".format(game["name"]))
self.logger.info("game cmd: {}".format(" ".join(game["cmd"])))
self.start_game(game)
def start_game(self, game):
"""Start a game."""
self.switch_workspace(self.workspace_num)
self.run_game_cmd(game["cmd"])
#self.float_game_window()
def switch_workspace(self, workspace_num):
"""Switch i3 workspace to the given worksapce."""
cmd_switch_workspace = ["i3-msg", "workspace", workspace_num]
raehutils.get_shell(cmd_switch_workspace)
# sleep a TINY bit (Dolphin comes up before we switch, somehow??)
#raehutils.get_shell(["sleep", "0.1"])
def float_game_window(self):
"""Float the game window (i3)."""
cmd_float_window = ["i3-msg", "floating", "enable"]
# sleep for a bit first to wait for the window to come up
raehutils.get_shell(["sleep", "1"])
raehutils.get_shell(cmd_float_window)
def run_game_cmd(self, cmd):
"""Run a shell command to start a game and detach."""
raehutils.run_shell_detached(cmd)
# alternative: don't detach, return return code
# maybe useful as a switch
#return raehutils.drop_to_shell(cmd)
if __name__ == "__main__":
program = PlayPy()
program.run()
| 2.078125 | 2 |
django_query_capture/utils.py | LeeHanYeong/django-query-capture | 62 | 12769605 | <reponame>LeeHanYeong/django-query-capture
import io
import sys
from contextlib import ContextDecorator
from django.utils import termcolors
from django_query_capture.capture import CapturedQuery
from django_query_capture.settings import get_config
class CaptureStdOutToString(ContextDecorator):
def __enter__(self) -> sys.stdout:
self.old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
return sys.stdout
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout = self.old_stdout
def colorize(value: str, is_warning: bool) -> str:
"""
Utility to set a color for the output string when it exceeds the threshold.
Args:
value: String to be output.
is_warning: Whether it exceeds the threshold.
Returns:
colorized string output
"""
if is_warning:
return termcolors.make_style(fg=get_config()["PRINT_THRESHOLDS"]["COLOR"])( # type: ignore
value
)
return value
def get_stack_prefix(captured_query: CapturedQuery) -> str:
"""
Utilities that help you output call stacks consistently in [CapturedQuery][capture.CapturedQuery].
Args:
captured_query: [CapturedQuery][capture.CapturedQuery]
"""
return f'[{captured_query["function_name"]}, {captured_query["file_name"]}:{captured_query["line_no"]}]'
def truncate_string(value: str, length: int) -> str:
"""
Args:
value: String to be output.
length: Number of strings to output.
Returns:
truncated string
"""
return (value[:length] + "..") if len(value) > length else value
| 2.4375 | 2 |
tree/bst/find mode in binary search tree.py | windowssocket/py_leetcode | 3 | 12769606 | <reponame>windowssocket/py_leetcode<gh_stars>1-10
class Solution(object):
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
global_val = list()
global_num = 0
if root is None:
return global_val
def in_traverse(root, result):
if root is None:
return
in_traverse(root.left, result)
result.append(root.val)
in_traverse(root.right, result)
result = list()
in_traverse(root, result)
pre_pointer = 0
post_pointer = 1
while post_pointer <= len(result) - 1:
if result[post_pointer] == result[pre_pointer]:
post_pointer += 1
else:
if post_pointer - pre_pointer > global_num:
global_val = [result[pre_pointer]]
global_num = post_pointer - pre_pointer
elif post_pointer - pre_pointer == global_num:
global_val.append(result[pre_pointer])
pre_pointer = post_pointer
post_pointer += 1
if post_pointer - pre_pointer > global_num:
global_val = [result[pre_pointer]]
elif post_pointer - pre_pointer == global_num:
global_val.append(result[pre_pointer])
return global_val | 2.9375 | 3 |
diagrams/oci/edge.py | Shimpei-GANGAN/diagrams | 1 | 12769607 | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _OCI
class _Edge(_OCI):
_type = "edge"
_icon_dir = "resources/oci/edge"
class CdnGrey(_Edge):
_icon = "cdn-grey.png"
class Cdn(_Edge):
_icon = "cdn.png"
class DnsGrey(_Edge):
_icon = "dns-grey.png"
class Dns(_Edge):
_icon = "dns.png"
class EmaildeliveryGrey(_Edge):
_icon = "emaildelivery-grey.png"
class Emaildelivery(_Edge):
_icon = "emaildelivery.png"
class WafGrey(_Edge):
_icon = "waf-grey.png"
class Waf(_Edge):
_icon = "waf.png"
# Aliases
| 1.546875 | 2 |
didi/deep/SasRecV3/model.py | bhneo/SparsePooling | 0 | 12769608 | import tensorflow as tf
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Layer, Dense, LayerNormalization, Dropout, Embedding, Input, PReLU
from modules import *
from tensorflow import keras
from tensorflow.keras.models import Model
# api functional model
def get_sasrec(maxlen, item_fea_col, embed_dim, embed_reg, dropout, num_heads, blocks, ffn_hidden_unit,
use_norm = True, causality = True):
# train_inputs = Input(shape=(206,), dtype=tf.float32, name = 'model_inputs') # (None, 206)
user_inputs = Input(shape=(170,), dtype=tf.float32, name='user_inputs') # (None, 170)
item_inputs = Input(shape=(36,), dtype=tf.float32, name='item_inputs') # (None, 36)
# split
tmp = tf.split(user_inputs, axis=1, num_or_size_splits=[20, 150])
user_feat_inputs, user_seq_inputs= tmp # (None,20) (None,150)
tmp = tf.split(item_inputs, axis=1, num_or_size_splits=[1, 35])
sample_cspuidx_inputs, item_feat_inputs = tmp # (None,1) (None,35)
### ********** ###
# user part
### ********** ###
new_seq_inputs = tf.cast(user_seq_inputs, dtype = tf.int32)
mask = tf.expand_dims(tf.cast(tf.not_equal(new_seq_inputs, 0), dtype=tf.float32), axis=-1) # (None, maxlen, 1)
item_embedding = Embedding(input_dim=item_fea_col['feat_num'],
input_length=1,
output_dim=embed_dim,
mask_zero=True,
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
seq_embed = item_embedding(new_seq_inputs) # (None, 150, dim=50)
pos_embedding = Embedding(input_dim=maxlen,
input_length=1,
output_dim=embed_dim,
mask_zero=False,
embeddings_initializer='random_uniform',
embeddings_regularizer=l2(embed_reg))
pos_encoding = tf.expand_dims(pos_embedding(tf.range(maxlen)), axis=0)
seq_embed += pos_encoding
seq_embed = Dropout(dropout)(seq_embed)
att_outputs = seq_embed # (None, maxlen, dim)
att_outputs *= mask
encoder_layer = [EncoderLayer(embed_dim, num_heads, ffn_hidden_unit,
dropout, use_norm, causality) for _ in range(blocks)]
for block in encoder_layer:
att_outputs = block([att_outputs, mask]) # (None, maxlen, dim)
att_outputs *= mask # (None, maxlen, dim)
seq_outputs = att_outputs[:, -1] # (None, dim) remain the embedding of the last item
# concat
user_feat_vec = tf.concat([user_feat_inputs, seq_outputs], -1) # (None,20+50)
# MLP
ffn_1 = Dense(units=60, activation='relu', use_bias=True, kernel_initializer=keras.initializers.he_uniform())
ffn_2 = Dense(units=50, activation='relu', use_bias=True, kernel_initializer=keras.initializers.he_uniform())
norm1 = LayerNormalization(epsilon=1e-6, trainable=use_norm)
norm2 = LayerNormalization(epsilon=1e-6, trainable=use_norm)
feat_vec = ffn_1(user_feat_vec)
feat_vec = norm1(feat_vec)
feat_vec = ffn_2(feat_vec) # (None,50)
user_vec = norm2(feat_vec)
### ********** ###
# item part
### ********** ###
item_info = item_embedding(sample_cspuidx_inputs) # (None, 1, dim)
item_emb_vec = item_info[:, -1] # (None, dim)
item_feat_vec = tf.concat([item_emb_vec, item_feat_inputs], -1) #(None, dim+35)
ffn_3 = Dense(units=50, activation='relu', use_bias=True, kernel_initializer=keras.initializers.he_uniform())
norm3 = LayerNormalization(epsilon=1e-6, trainable=use_norm)
item_vec = ffn_3(item_feat_vec) #(None, 50)
item_vec = norm3(item_vec)
# compute logits
logits = tf.reduce_sum(user_vec * item_vec, axis=-1, keepdims=True) # (None, 1)
logits = tf.nn.sigmoid(logits)
model = Model(inputs=[user_inputs, item_inputs], outputs=[logits])
model.__setattr__("user_inputs", user_inputs)
model.__setattr__("user_outputs", user_vec)
model.__setattr__("item_inputs", item_inputs)
model.__setattr__("item_outputs", item_vec)
return model
| 2.4375 | 2 |
tests/python/generated_typing_t.py | mychiux413/ConstConv | 0 | 12769609 | from .generated_typing import MultiLangs, ZH_TW, EN
def test_generated():
ml = MultiLangs(ZH_TW)
assert ml.hello == "您好,歡迎"
assert ml.login == "登入"
assert ml.select_lang == "繁體中文"
ml.set_lang(EN)
assert ml.hello == "Hello,Welcome"
assert ml.login == "Login"
assert ml.select_lang == "English"
| 2.265625 | 2 |
tests/hikari/utilities/test_event_stream.py | tomxey/hikari | 0 | 12769610 | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import logging
import unittest
import weakref
import mock
import pytest
from hikari import events
from hikari import iterators
from hikari.impl import bot
from hikari.utilities import event_stream
from tests.hikari import hikari_test_helpers
class TestStreamer:
@pytest.fixture(scope="module")
def stub_streamer(self):
return hikari_test_helpers.mock_class_namespace(event_stream.Streamer)
@pytest.mark.asyncio
async def test___aenter___and___aexit__(self, stub_streamer):
async with stub_streamer():
stub_streamer.open.assert_awaited_once()
stub_streamer.close.assert_not_called()
stub_streamer.open.assert_awaited_once()
stub_streamer.close.assert_awaited_once()
def test___enter__(self, stub_streamer):
# flake8 gets annoyed if we use "with" here so here's a hacky alternative
with pytest.raises(TypeError, match=" is async-only, did you mean 'async with'?"):
stub_streamer().__enter__()
def test___exit__(self, stub_streamer):
try:
stub_streamer().__exit__(None, None, None)
except AttributeError as exc:
pytest.fail(exc)
@pytest.fixture()
def mock_app():
return mock.Mock(bot.BotApp)
class TestEventStream:
@pytest.mark.asyncio
async def test__listener_when_filter_returns_false(self, mock_app):
stream = event_stream.EventStream(mock_app, events.Event, timeout=None)
stream.filter(lambda _: False)
mock_event = object()
assert await stream._listener(mock_event) is None
assert stream._queue.qsize() == 0
@pytest.mark.asyncio
async def test__listener_when_filter_passes_and_queue_full(self):
stream = event_stream.EventStream(mock_app, events.Event, timeout=None, limit=2)
stream._queue.put_nowait(object())
stream._queue.put_nowait(object())
stream.filter(lambda _: True)
mock_event = object()
assert await stream._listener(mock_event) is None
assert stream._queue.qsize() == 2
assert stream._queue.get_nowait() is not mock_event
assert stream._queue.get_nowait() is not mock_event
@pytest.mark.asyncio
async def test__listener_when_filter_passes_and_queue_not_full(self):
stream = event_stream.EventStream(mock_app, events.Event, timeout=None, limit=None)
stream._queue.put_nowait(object())
stream._queue.put_nowait(object())
stream.filter(lambda _: True)
mock_event = object()
assert await stream._listener(mock_event) is None
assert stream._queue.qsize() == 3
assert stream._queue.get_nowait() is not mock_event
assert stream._queue.get_nowait() is not mock_event
assert stream._queue.get_nowait() is mock_event
@pytest.mark.asyncio
async def test___anext___when_stream_closed(self):
streamer = hikari_test_helpers.stub_class(event_stream.EventStream, _active=False)
# flake8 gets annoyed if we use "with" here so here's a hacky alternative
with pytest.raises(TypeError):
await streamer.__anext__()
@pytest.mark.asyncio
@hikari_test_helpers.timeout()
async def test___anext___times_out(self):
streamer = hikari_test_helpers.stub_class(
event_stream.EventStream,
_active=True,
_queue=asyncio.Queue(),
_timeout=hikari_test_helpers.REASONABLE_QUICK_RESPONSE_TIME,
)
async for _ in streamer:
pytest.fail("streamer shouldn't have yielded anything")
else:
# Ensure we don't get a warning or error on del
streamer._active = False
@pytest.mark.asyncio
@hikari_test_helpers.timeout()
async def test___anext___waits_for_next_event(self):
mock_event = object()
streamer = hikari_test_helpers.stub_class(
event_stream.EventStream,
_active=True,
_queue=asyncio.Queue(),
_timeout=hikari_test_helpers.REASONABLE_SLEEP_TIME * 3,
)
async def add_event():
await asyncio.sleep(hikari_test_helpers.REASONABLE_SLEEP_TIME)
streamer._queue.put_nowait(mock_event)
asyncio.create_task(add_event())
async for event in streamer:
assert event is mock_event
# Ensure we don't get a warning or error on del
streamer._active = False
return
pytest.fail("streamer should've yielded something")
@pytest.mark.asyncio
@hikari_test_helpers.timeout()
async def test___anext__(self):
mock_event = object()
streamer = hikari_test_helpers.stub_class(
event_stream.EventStream,
_active=True,
_queue=asyncio.Queue(),
_timeout=hikari_test_helpers.REASONABLE_QUICK_RESPONSE_TIME,
)
streamer._queue.put_nowait(mock_event)
async for event in streamer:
assert event is mock_event
# Ensure we don't get a warning or error on del
streamer._active = False
return
pytest.fail("streamer should've yielded something")
@pytest.mark.asyncio
async def test___await__(self):
mock_event_0 = object()
mock_event_1 = object()
mock_event_2 = object()
streamer = hikari_test_helpers.mock_class_namespace(
event_stream.EventStream,
close=mock.AsyncMock(),
open=mock.AsyncMock(),
init=False,
__anext__=mock.AsyncMock(side_effect=[mock_event_0, mock_event_1, mock_event_2]),
)()
streamer._active = False
assert await streamer == [mock_event_0, mock_event_1, mock_event_2]
streamer.open.assert_awaited_once()
streamer.close.assert_awaited_once()
def test___del___for_active_stream(self):
mock_coroutine = object()
close_method = mock.Mock(return_value=mock_coroutine)
streamer = hikari_test_helpers.mock_class_namespace(event_stream.EventStream, close=close_method, init=False)()
streamer._event_type = events.Event
streamer._active = True
with mock.patch.object(asyncio, "ensure_future", return_value=mock_coroutine):
with unittest.TestCase().assertLogs("hikari", level=logging.WARNING) as logging_watcher:
del streamer
assert logging_watcher.output == [
"WARNING:hikari:active 'Event' streamer fell out of scope before being closed"
]
asyncio.ensure_future.assert_called_once_with(mock_coroutine)
close_method.assert_called_once_with()
def test___del___for_inactive_stream(self):
close_method = mock.Mock()
streamer = hikari_test_helpers.mock_class_namespace(event_stream.EventStream, close=close_method, init=False)()
streamer._event_type = events.Event
streamer._active = False
with mock.patch.object(asyncio, "ensure_future"):
del streamer
asyncio.ensure_future.assert_not_called()
close_method.assert_not_called()
@pytest.mark.asyncio
async def test_close_for_inactive_stream(self, mock_app):
stream = event_stream.EventStream(mock_app, events.Event, timeout=None, limit=None)
await stream.close()
mock_app.dispatcher.unsubscribe.assert_not_called()
@pytest.mark.asyncio
async def test_close_for_active_stream(self, mock_app):
mock_registered_listener = object()
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_app=mock_app,
_event_type=events.Event,
_active=True,
_registered_listener=mock_registered_listener,
)
await stream.close()
mock_app.dispatcher.unsubscribe.assert_called_once_with(events.Event, mock_registered_listener)
assert stream._active is False
assert stream._registered_listener is None
@pytest.mark.asyncio
async def test_close_for_active_stream_handles_value_error(self, mock_app):
mock_registered_listener = object()
mock_app.dispatcher.unsubscribe.side_effect = ValueError
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_app=mock_app,
_event_type=events.Event,
_active=True,
_registered_listener=mock_registered_listener,
)
await stream.close()
mock_app.dispatcher.unsubscribe.assert_called_once_with(events.Event, mock_registered_listener)
assert stream._active is False
assert stream._registered_listener is None
def test_filter_for_inactive_stream(self):
stream = hikari_test_helpers.stub_class(event_stream.EventStream, _filters=iterators.All(()), _active=False)
first_pass = mock.Mock(attr=True)
second_pass = mock.Mock(attr=True)
first_fails = mock.Mock(attr=True)
second_fail = mock.Mock(attr=False)
def predicate(obj):
return obj in (first_pass, second_pass)
stream.filter(predicate, attr=True)
assert stream._filters(first_pass) is True
assert stream._filters(first_fails) is False
assert stream._filters(second_pass) is True
assert stream._filters(second_fail) is False
@pytest.mark.asyncio
async def test_filter_for_active_stream(self):
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_active=True,
)
mock_wrapping_iterator = object()
predicate = object()
with mock.patch.object(iterators.LazyIterator, "filter", return_value=mock_wrapping_iterator):
assert stream.filter(predicate, name="OK") is mock_wrapping_iterator
iterators.LazyIterator.filter.assert_called_once_with(predicate, name="OK")
# Ensure we don't get a warning or error on del
stream._active = False
@pytest.mark.asyncio
async def test_open_for_inactive_stream(self, mock_app):
mock_listener = object()
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_app=mock_app,
_event_type=events.Event,
_active=True,
_registered_listener=mock_listener,
)
with mock.patch.object(event_stream, "_generate_weak_listener"):
with mock.patch.object(weakref, "WeakMethod"):
await stream.open()
weakref.WeakMethod.assert_not_called()
event_stream._generate_weak_listener.assert_not_called()
mock_app.dispatcher.subscribe.assert_not_called()
assert stream._active is True
assert stream._registered_listener is mock_listener
# Ensure we don't get a warning or error on del
stream._active = False
@pytest.mark.asyncio
async def test_open_for_active_stream(self, mock_app):
stream = hikari_test_helpers.stub_class(
event_stream.EventStream,
_app=mock_app,
_event_type=events.Event,
_active=False,
)
mock_listener = object()
mock_listener_ref = object()
with mock.patch.object(event_stream, "_generate_weak_listener", return_value=mock_listener):
with mock.patch.object(weakref, "WeakMethod", return_value=mock_listener_ref):
await stream.open()
weakref.WeakMethod.assert_called_once_with(stream._listener)
event_stream._generate_weak_listener.assert_called_once_with(mock_listener_ref)
mock_app.dispatcher.subscribe.assert_called_once_with(events.Event, mock_listener)
assert stream._active is True
assert stream._registered_listener is mock_listener
# Ensure we don't get a warning or error on del
stream._active = False
| 1.695313 | 2 |
UTest/dataset_test.py | rzumer/VideoLowLevelVision | 3 | 12769611 | <gh_stars>1-10
import os
if not os.getcwd().endswith('UTest'):
os.chdir('UTest')
from VLLV.DataLoader.Dataset import _glob_absolute_pattern, load_datasets
DATASETS = load_datasets('./data/fake_datasets.yml')
def test_glob_absolute_pattern():
URL = './data/set5_x2'
node = _glob_absolute_pattern(URL)
assert len(node) == 5
assert node[0].match('img_001_SRF_2_LR.png')
assert node[1].match('img_002_SRF_2_LR.png')
assert node[2].match('img_003_SRF_2_LR.png')
assert node[3].match('img_004_SRF_2_LR.png')
assert node[4].match('img_005_SRF_2_LR.png')
URL = './data/flying_chair/**/*.flo'
node = _glob_absolute_pattern(URL)
assert len(node) == 1
assert node[0].match('0000.flo')
URL = './data/**/*.png'
node = _glob_absolute_pattern(URL)
assert len(node) == 10
def test_existence():
_K = DATASETS.keys()
for k in _K:
print('==== [', k, '] ====')
_V = []
try:
_V = DATASETS[k].train
except ValueError:
if not _V:
print('[Warning] Train set of', k, 'doesn\'t exist.')
finally:
_V = []
try:
_V = DATASETS[k].val
except ValueError:
if not _V:
print('[Warning] Val set of', k, 'doesn\'t exist.')
finally:
_V = []
try:
_V = DATASETS[k].test
except ValueError:
if not _V:
print('[Warning] Test set of', k, 'doesn\'t exist.')
print('=========================', flush=True)
| 2.28125 | 2 |
setup.py | vsevolod-kolchinsky/py-ugs3client | 1 | 12769612 | <gh_stars>1-10
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='py-ugs3client',
version='0.9.1',
description='UGS3 Python client',
long_description=readme(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
url='https://github.com/vsevolod-kolchinsky/py-ugs3client',
author='<NAME>',
author_email='<EMAIL>',
license='Apache Software License',
packages=['ugs3client'],
install_requires=[
'requests','cached-property','pymemcache',
],
)
| 1.484375 | 1 |
dnazip/burros_wheeler.py | dabane-ghassan/dnazip | 5 | 12769613 | # -*- coding: utf-8 -*-
"""
Burrows-Wheeler Algorithm Class, contains naive and advanced construction
methods, static methods were written as explicit as possible and were
factorized to facilitate algorithmic readability.
The yielding in some functions doesn't respect Space complexity, but it was done in this
manner to facilitate the data flow into the View class (the GUI).
"""
from __future__ import absolute_import
from typing import List, Tuple
class BurrosWheeler:
"""A class to represent the Burrows-Wheeler algorithm, all methods are
static for ease of reading and outside usability.
"""
@staticmethod
def pprint(mat: List[str]) -> None:
"""Pretty print, this method prints a burrows wheeler matrix
beautifully (without lists and strings).
Parameters
----------
mat : List[str]
The Burros-Wheeler matrix, i.e; a list of strings.
Returns
-------
None
Prints the matrix.
"""
for line in mat:
print(*line, sep="") # scatter operator to print all elements
# of a line
@staticmethod
def string_rotations(seq: str) -> List[str]:
"""Returns all string rotations of a sequence.
Parameters
----------
seq : str
he sequence to be rotated.
Returns
-------
List[str]
Returns a list of strings, i.e; a BW matrix like object.
"""
seq += '$'
double_seq = seq * 2
all_rotations = []
for i in range(0, len(seq), 1):
rot = double_seq[i:i+len(seq)]
all_rotations.append(rot)
yield [rot for rot in all_rotations]
@staticmethod
def construct_bwm(rotations: List[str]) -> List[str]:
"""This method constructs the Burrows-Wheeler Matrix from a list of
string rotations.
Parameters
----------
rotations : List[str]
A list of strings, i.e; a BW matrix like object.
Returns
-------
List[str]
A list of strings or a Burrows-Wheeler Matrix.
"""
sorted_rotations = sorted(rotations)
return sorted_rotations
@staticmethod
def encode_bwt(matrix: List[str]) -> str:
"""Returns the Burrows-Wheeler Transform from a given Burros-Wheeler
Matrix. the Burros-Wheeler Transform corresponds to the last column
of the matrix.
Parameters
----------
matrix : List[str]
A Burrows-Wheeler Matrix.
Returns
-------
str
The Burrows-Wheeler Transform.
"""
last_column = []
for line in matrix:
last_char = line[-1]
last_column.append(last_char)
transformed_seq = ''.join(last_column)
return transformed_seq
@staticmethod
def reconstruct_bwm(bwt: str) -> List[str]:
"""This method reconstructs the Burrows-Wheeler Matrix given the
corresponding Burros-Wheeler Transform. The naive algorithm for
constructing the matrix given the transform is going to iteratively
add the transform as a left column, then sorts lexicographically
the columns.
Parameters
----------
bwt : str
The Burrows-Wheeler Transform.
Returns
-------
List[str]
A Burrows-Wheeler Matrix.
"""
bwm = []
# first loop to create seeds for lines O(n)
for _ in range(0, len(bwt), 1):
bwm.append('')
for _ in range(0, len(bwt), 1):
for i in range(0, len(bwt), 1):
bwm[i] = bwt[i] + bwm[i]
yield [line for line in bwm]
bwm.sort()
yield [line for line in bwm]
@staticmethod
def decode_bwt(matrix: List[str]) -> str:
"""This method returns the original sequence from a given
Burrows-Wheeler Matrix, the original sequence is the line that ends
with the character '$'.
Parameters
----------
matrix : List[str]
A Burrows-Wheeler Matrix.
Returns
-------
str
The original sequence.
"""
seq = ""
for line in matrix: # search for the line that ends with '$'
if line[-1] == "$":
seq += line
return seq[:-1] # return the sequence without the '$' sign
@staticmethod
def suffix_array(sequence: str) -> List[Tuple[str, int]]:
"""Builds a suffix-array from a given sequence of characters.
- Complexity of the algorithm O(n^2log(n))
- Sorting is O(nlogn) and Finding is O(n)
Parameters
----------
sequence : str
The given sequence of characters.
Returns
-------
List[Tuple[str, int]]
The suffix array of the sequence; a list of tuples.
"""
sequence += '$'
suff_arr = []
for i in range(0, len(sequence), 1):
suff_arr.append((sequence[i:], i))
return sorted(suff_arr)
@staticmethod
def bwt_advanced(sequence: str) -> str:
"""Generates a Burrows-Wheeler Transfrom from a suffix array, advanced
construction of BWT. Better algorithmic complexity.
Parameters
----------
sequence : str
The sequence to ve transformed.
Returns
-------
str
The Burrows-Wheeler Transform.
"""
bwt = []
for suff in BurrosWheeler.suffix_array(sequence):
i = suff[1] # The suffix's index is the 2nd element in the tuple
if i == 0:
bwt.append('$')
else:
bwt.append(sequence[i - 1])
return ''.join(bwt)
| 3.609375 | 4 |
meerschaum/Pipe/_fetch.py | bmeares/Meerschaum | 32 | 12769614 | <gh_stars>10-100
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Functions for fetching new data into the Pipe
"""
from __future__ import annotations
from meerschaum.utils.typing import Optional, Any
def fetch(
self,
begin : Optional[datetime.datetime] = None,
end : Optional[datetime.datetime] = None,
sync_chunks : bool = False,
deactivate_plugin_venv: bool = True,
debug : bool = False,
**kw : Any
) -> 'pd.DataFrame or None':
"""
Fetch a Pipe's latest data from its connector.
returns : pd.DataFrame of newest unseen data
"""
if 'fetch' not in dir(self.connector):
from meerschaum.utils.warnings import warn
warn(f"No `fetch()` function defined for connector '{self.connector}'")
return None
from meerschaum.utils.debug import dprint
if self.connector.type == 'plugin':
from meerschaum.utils.packages import activate_venv, deactivate_venv
activate_venv(self.connector.label, debug=debug)
_chunk_hook = kw.pop('chunk_hook') if 'chunk_hook' in kw else None
df = self.connector.fetch(
self,
begin = begin,
end = end,
chunk_hook = (
self.sync if sync_chunks and _chunk_hook is None
else _chunk_hook
),
debug = debug,
**kw
)
if self.connector.type == 'plugin' and deactivate_plugin_venv:
deactivate_venv(self.connector.label, debug=debug)
### Return True if we're syncing in parallel, else continue as usual.
if sync_chunks:
return True
return df
| 2.375 | 2 |
sdk/python/pulumi_azure_native/servicefabricmesh/v20180701preview/_inputs.py | sebtelko/pulumi-azure-native | 0 | 12769615 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AzureInternalMonitoringPipelineSinkDescriptionArgs',
'ContainerCodePackagePropertiesArgs',
'ContainerLabelArgs',
'ContainerVolumeArgs',
'DiagnosticsDescriptionArgs',
'DiagnosticsRefArgs',
'EndpointPropertiesArgs',
'EnvironmentVariableArgs',
'ImageRegistryCredentialArgs',
'IngressConfigArgs',
'Layer4IngressConfigArgs',
'NetworkRefArgs',
'ResourceLimitsArgs',
'ResourceRequestsArgs',
'ResourceRequirementsArgs',
'ServiceResourceDescriptionArgs',
'SettingArgs',
'VolumeProviderParametersAzureFileArgs',
]
@pulumi.input_type
class AzureInternalMonitoringPipelineSinkDescriptionArgs:
def __init__(__self__, *,
kind: pulumi.Input[str],
account_name: Optional[pulumi.Input[str]] = None,
auto_key_config_url: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
fluentd_config_url: Optional[Any] = None,
ma_config_url: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None):
"""
Diagnostics settings for Geneva.
:param pulumi.Input[str] kind: The kind of DiagnosticsSink.
Expected value is 'AzureInternalMonitoringPipeline'.
:param pulumi.Input[str] account_name: Azure Internal monitoring pipeline account.
:param pulumi.Input[str] auto_key_config_url: Azure Internal monitoring pipeline autokey associated with the certificate.
:param pulumi.Input[str] description: A description of the sink.
:param Any fluentd_config_url: Azure Internal monitoring agent fluentd configuration.
:param pulumi.Input[str] ma_config_url: Azure Internal monitoring agent configuration.
:param pulumi.Input[str] name: Name of the sink. This value is referenced by DiagnosticsReferenceDescription
:param pulumi.Input[str] namespace: Azure Internal monitoring pipeline account namespace.
"""
pulumi.set(__self__, "kind", 'AzureInternalMonitoringPipeline')
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if auto_key_config_url is not None:
pulumi.set(__self__, "auto_key_config_url", auto_key_config_url)
if description is not None:
pulumi.set(__self__, "description", description)
if fluentd_config_url is not None:
pulumi.set(__self__, "fluentd_config_url", fluentd_config_url)
if ma_config_url is not None:
pulumi.set(__self__, "ma_config_url", ma_config_url)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
The kind of DiagnosticsSink.
Expected value is 'AzureInternalMonitoringPipeline'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
Azure Internal monitoring pipeline account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="autoKeyConfigUrl")
def auto_key_config_url(self) -> Optional[pulumi.Input[str]]:
"""
Azure Internal monitoring pipeline autokey associated with the certificate.
"""
return pulumi.get(self, "auto_key_config_url")
@auto_key_config_url.setter
def auto_key_config_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_key_config_url", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the sink.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="fluentdConfigUrl")
def fluentd_config_url(self) -> Optional[Any]:
"""
Azure Internal monitoring agent fluentd configuration.
"""
return pulumi.get(self, "fluentd_config_url")
@fluentd_config_url.setter
def fluentd_config_url(self, value: Optional[Any]):
pulumi.set(self, "fluentd_config_url", value)
@property
@pulumi.getter(name="maConfigUrl")
def ma_config_url(self) -> Optional[pulumi.Input[str]]:
"""
Azure Internal monitoring agent configuration.
"""
return pulumi.get(self, "ma_config_url")
@ma_config_url.setter
def ma_config_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ma_config_url", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the sink. This value is referenced by DiagnosticsReferenceDescription
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Azure Internal monitoring pipeline account namespace.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class ContainerCodePackagePropertiesArgs:
def __init__(__self__, *,
image: pulumi.Input[str],
name: pulumi.Input[str],
resources: pulumi.Input['ResourceRequirementsArgs'],
commands: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
diagnostics: Optional[pulumi.Input['DiagnosticsRefArgs']] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointPropertiesArgs']]]] = None,
entrypoint: Optional[pulumi.Input[str]] = None,
environment_variables: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableArgs']]]] = None,
image_registry_credential: Optional[pulumi.Input['ImageRegistryCredentialArgs']] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerLabelArgs']]]] = None,
settings: Optional[pulumi.Input[Sequence[pulumi.Input['SettingArgs']]]] = None,
volume_refs: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerVolumeArgs']]]] = None):
"""
Describes a container and its runtime properties.
:param pulumi.Input[str] image: The Container image to use.
:param pulumi.Input[str] name: The name of the code package.
:param pulumi.Input['ResourceRequirementsArgs'] resources: This type describes the resource requirements for a container or a service.
:param pulumi.Input[Sequence[pulumi.Input[str]]] commands: Command array to execute within the container in exec form.
:param pulumi.Input['DiagnosticsRefArgs'] diagnostics: Reference to sinks in DiagnosticsDescription.
:param pulumi.Input[Sequence[pulumi.Input['EndpointPropertiesArgs']]] endpoints: The endpoints exposed by this container.
:param pulumi.Input[str] entrypoint: Override for the default entry point in the container.
:param pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableArgs']]] environment_variables: The environment variables to set in this container
:param pulumi.Input['ImageRegistryCredentialArgs'] image_registry_credential: Image registry credential.
:param pulumi.Input[Sequence[pulumi.Input['ContainerLabelArgs']]] labels: The labels to set in this container.
:param pulumi.Input[Sequence[pulumi.Input['SettingArgs']]] settings: The settings to set in this container. The setting file path can be fetched from environment variable "Fabric_SettingPath". The path for Windows container is "C:\\secrets". The path for Linux container is "/var/secrets".
:param pulumi.Input[Sequence[pulumi.Input['ContainerVolumeArgs']]] volume_refs: The volumes to be attached to the container.
"""
pulumi.set(__self__, "image", image)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resources", resources)
if commands is not None:
pulumi.set(__self__, "commands", commands)
if diagnostics is not None:
pulumi.set(__self__, "diagnostics", diagnostics)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if entrypoint is not None:
pulumi.set(__self__, "entrypoint", entrypoint)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if image_registry_credential is not None:
pulumi.set(__self__, "image_registry_credential", image_registry_credential)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if settings is not None:
pulumi.set(__self__, "settings", settings)
if volume_refs is not None:
pulumi.set(__self__, "volume_refs", volume_refs)
@property
@pulumi.getter
def image(self) -> pulumi.Input[str]:
"""
The Container image to use.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: pulumi.Input[str]):
pulumi.set(self, "image", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the code package.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def resources(self) -> pulumi.Input['ResourceRequirementsArgs']:
"""
This type describes the resource requirements for a container or a service.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: pulumi.Input['ResourceRequirementsArgs']):
pulumi.set(self, "resources", value)
@property
@pulumi.getter
def commands(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command array to execute within the container in exec form.
"""
return pulumi.get(self, "commands")
@commands.setter
def commands(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "commands", value)
@property
@pulumi.getter
def diagnostics(self) -> Optional[pulumi.Input['DiagnosticsRefArgs']]:
"""
Reference to sinks in DiagnosticsDescription.
"""
return pulumi.get(self, "diagnostics")
@diagnostics.setter
def diagnostics(self, value: Optional[pulumi.Input['DiagnosticsRefArgs']]):
pulumi.set(self, "diagnostics", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EndpointPropertiesArgs']]]]:
"""
The endpoints exposed by this container.
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EndpointPropertiesArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter
def entrypoint(self) -> Optional[pulumi.Input[str]]:
"""
Override for the default entry point in the container.
"""
return pulumi.get(self, "entrypoint")
@entrypoint.setter
def entrypoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entrypoint", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableArgs']]]]:
"""
The environment variables to set in this container
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentVariableArgs']]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="imageRegistryCredential")
def image_registry_credential(self) -> Optional[pulumi.Input['ImageRegistryCredentialArgs']]:
"""
Image registry credential.
"""
return pulumi.get(self, "image_registry_credential")
@image_registry_credential.setter
def image_registry_credential(self, value: Optional[pulumi.Input['ImageRegistryCredentialArgs']]):
pulumi.set(self, "image_registry_credential", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerLabelArgs']]]]:
"""
The labels to set in this container.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerLabelArgs']]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SettingArgs']]]]:
"""
The settings to set in this container. The setting file path can be fetched from environment variable "Fabric_SettingPath". The path for Windows container is "C:\\secrets". The path for Linux container is "/var/secrets".
"""
return pulumi.get(self, "settings")
@settings.setter
def settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SettingArgs']]]]):
pulumi.set(self, "settings", value)
@property
@pulumi.getter(name="volumeRefs")
def volume_refs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ContainerVolumeArgs']]]]:
"""
The volumes to be attached to the container.
"""
return pulumi.get(self, "volume_refs")
@volume_refs.setter
def volume_refs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ContainerVolumeArgs']]]]):
pulumi.set(self, "volume_refs", value)
@pulumi.input_type
class ContainerLabelArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
Describes a container label.
:param pulumi.Input[str] name: The name of the container label.
:param pulumi.Input[str] value: The value of the container label.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the container label.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the container label.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ContainerVolumeArgs:
def __init__(__self__, *,
destination_path: pulumi.Input[str],
name: pulumi.Input[str],
read_only: Optional[pulumi.Input[bool]] = None):
"""
Describes how a volume is attached to a container.
:param pulumi.Input[str] destination_path: The path within the container at which the volume should be mounted. Only valid path characters are allowed.
:param pulumi.Input[str] name: Name of the volume.
:param pulumi.Input[bool] read_only: The flag indicating whether the volume is read only. Default is 'false'.
"""
pulumi.set(__self__, "destination_path", destination_path)
pulumi.set(__self__, "name", name)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
@property
@pulumi.getter(name="destinationPath")
def destination_path(self) -> pulumi.Input[str]:
"""
The path within the container at which the volume should be mounted. Only valid path characters are allowed.
"""
return pulumi.get(self, "destination_path")
@destination_path.setter
def destination_path(self, value: pulumi.Input[str]):
pulumi.set(self, "destination_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the volume.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[pulumi.Input[bool]]:
"""
The flag indicating whether the volume is read only. Default is 'false'.
"""
return pulumi.get(self, "read_only")
@read_only.setter
def read_only(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "read_only", value)
@pulumi.input_type
class DiagnosticsDescriptionArgs:
def __init__(__self__, *,
default_sink_refs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
sinks: Optional[pulumi.Input[Sequence[pulumi.Input['AzureInternalMonitoringPipelineSinkDescriptionArgs']]]] = None):
"""
Describes the diagnostics options available
:param pulumi.Input[Sequence[pulumi.Input[str]]] default_sink_refs: The sinks to be used if diagnostics is enabled. Sink choices can be overridden at the service and code package level.
:param pulumi.Input[bool] enabled: Status of whether or not sinks are enabled.
:param pulumi.Input[Sequence[pulumi.Input['AzureInternalMonitoringPipelineSinkDescriptionArgs']]] sinks: List of supported sinks that can be referenced.
"""
if default_sink_refs is not None:
pulumi.set(__self__, "default_sink_refs", default_sink_refs)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if sinks is not None:
pulumi.set(__self__, "sinks", sinks)
@property
@pulumi.getter(name="defaultSinkRefs")
def default_sink_refs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The sinks to be used if diagnostics is enabled. Sink choices can be overridden at the service and code package level.
"""
return pulumi.get(self, "default_sink_refs")
@default_sink_refs.setter
def default_sink_refs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "default_sink_refs", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Status of whether or not sinks are enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def sinks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AzureInternalMonitoringPipelineSinkDescriptionArgs']]]]:
"""
List of supported sinks that can be referenced.
"""
return pulumi.get(self, "sinks")
@sinks.setter
def sinks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AzureInternalMonitoringPipelineSinkDescriptionArgs']]]]):
pulumi.set(self, "sinks", value)
@pulumi.input_type
class DiagnosticsRefArgs:
def __init__(__self__, *,
enabled: Optional[pulumi.Input[bool]] = None,
sink_refs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Reference to sinks in DiagnosticsDescription.
:param pulumi.Input[bool] enabled: Status of whether or not sinks are enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] sink_refs: List of sinks to be used if enabled. References the list of sinks in DiagnosticsDescription.
"""
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if sink_refs is not None:
pulumi.set(__self__, "sink_refs", sink_refs)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Status of whether or not sinks are enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="sinkRefs")
def sink_refs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of sinks to be used if enabled. References the list of sinks in DiagnosticsDescription.
"""
return pulumi.get(self, "sink_refs")
@sink_refs.setter
def sink_refs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "sink_refs", value)
@pulumi.input_type
class EndpointPropertiesArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
port: Optional[pulumi.Input[int]] = None):
"""
Describes a container endpoint.
:param pulumi.Input[str] name: The name of the endpoint.
:param pulumi.Input[int] port: Port used by the container.
"""
pulumi.set(__self__, "name", name)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the endpoint.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Port used by the container.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@pulumi.input_type
class EnvironmentVariableArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Describes an environment variable for the container.
:param pulumi.Input[str] name: The name of the environment variable.
:param pulumi.Input[str] value: The value of the environment variable.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the environment variable.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the environment variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ImageRegistryCredentialArgs:
def __init__(__self__, *,
server: pulumi.Input[str],
username: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None):
"""
Image registry credential.
:param pulumi.Input[str] server: Docker image registry server, without protocol such as `http` and `https`.
:param pulumi.Input[str] username: The username for the private registry.
:param pulumi.Input[str] password: The password for the private registry.
"""
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
if password is not None:
pulumi.set(__self__, "password", password)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
Docker image registry server, without protocol such as `http` and `https`.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username for the private registry.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password for the private registry.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@pulumi.input_type
class IngressConfigArgs:
def __init__(__self__, *,
layer4: Optional[pulumi.Input[Sequence[pulumi.Input['Layer4IngressConfigArgs']]]] = None,
qos_level: Optional[pulumi.Input[Union[str, 'IngressQoSLevel']]] = None):
"""
Describes public connectivity configuration for the network.
:param pulumi.Input[Sequence[pulumi.Input['Layer4IngressConfigArgs']]] layer4: Configuration for layer4 public connectivity for this network.
:param pulumi.Input[Union[str, 'IngressQoSLevel']] qos_level: The QoS tier for ingress.
"""
if layer4 is not None:
pulumi.set(__self__, "layer4", layer4)
if qos_level is not None:
pulumi.set(__self__, "qos_level", qos_level)
@property
@pulumi.getter
def layer4(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['Layer4IngressConfigArgs']]]]:
"""
Configuration for layer4 public connectivity for this network.
"""
return pulumi.get(self, "layer4")
@layer4.setter
def layer4(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['Layer4IngressConfigArgs']]]]):
pulumi.set(self, "layer4", value)
@property
@pulumi.getter(name="qosLevel")
def qos_level(self) -> Optional[pulumi.Input[Union[str, 'IngressQoSLevel']]]:
"""
The QoS tier for ingress.
"""
return pulumi.get(self, "qos_level")
@qos_level.setter
def qos_level(self, value: Optional[pulumi.Input[Union[str, 'IngressQoSLevel']]]):
pulumi.set(self, "qos_level", value)
@pulumi.input_type
class Layer4IngressConfigArgs:
def __init__(__self__, *,
application_name: Optional[pulumi.Input[str]] = None,
endpoint_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
public_port: Optional[pulumi.Input[int]] = None,
service_name: Optional[pulumi.Input[str]] = None):
"""
Describes the layer4 configuration for public connectivity for this network.
:param pulumi.Input[str] application_name: The application name which contains the service to be exposed.
:param pulumi.Input[str] endpoint_name: The service endpoint that needs to be exposed.
:param pulumi.Input[str] name: Layer4 ingress config name.
:param pulumi.Input[int] public_port: Specifies the public port at which the service endpoint below needs to be exposed.
:param pulumi.Input[str] service_name: The service whose endpoint needs to be exposed at the public port.
"""
if application_name is not None:
pulumi.set(__self__, "application_name", application_name)
if endpoint_name is not None:
pulumi.set(__self__, "endpoint_name", endpoint_name)
if name is not None:
pulumi.set(__self__, "name", name)
if public_port is not None:
pulumi.set(__self__, "public_port", public_port)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter(name="applicationName")
def application_name(self) -> Optional[pulumi.Input[str]]:
"""
The application name which contains the service to be exposed.
"""
return pulumi.get(self, "application_name")
@application_name.setter
def application_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_name", value)
@property
@pulumi.getter(name="endpointName")
def endpoint_name(self) -> Optional[pulumi.Input[str]]:
"""
The service endpoint that needs to be exposed.
"""
return pulumi.get(self, "endpoint_name")
@endpoint_name.setter
def endpoint_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Layer4 ingress config name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="publicPort")
def public_port(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the public port at which the service endpoint below needs to be exposed.
"""
return pulumi.get(self, "public_port")
@public_port.setter
def public_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "public_port", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
The service whose endpoint needs to be exposed at the public port.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@pulumi.input_type
class NetworkRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
Describes a network reference in a service.
:param pulumi.Input[str] name: Name of the network.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the network.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ResourceLimitsArgs:
def __init__(__self__, *,
cpu: Optional[pulumi.Input[float]] = None,
memory_in_gb: Optional[pulumi.Input[float]] = None):
"""
This type describes the resource limits for a given container. It describes the most amount of resources a container is allowed to use before being restarted.
:param pulumi.Input[float] cpu: CPU limits in cores. At present, only full cores are supported.
:param pulumi.Input[float] memory_in_gb: The memory limit in GB.
"""
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if memory_in_gb is not None:
pulumi.set(__self__, "memory_in_gb", memory_in_gb)
@property
@pulumi.getter
def cpu(self) -> Optional[pulumi.Input[float]]:
"""
CPU limits in cores. At present, only full cores are supported.
"""
return pulumi.get(self, "cpu")
@cpu.setter
def cpu(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "cpu", value)
@property
@pulumi.getter(name="memoryInGB")
def memory_in_gb(self) -> Optional[pulumi.Input[float]]:
"""
The memory limit in GB.
"""
return pulumi.get(self, "memory_in_gb")
@memory_in_gb.setter
def memory_in_gb(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "memory_in_gb", value)
@pulumi.input_type
class ResourceRequestsArgs:
def __init__(__self__, *,
cpu: pulumi.Input[float],
memory_in_gb: pulumi.Input[float]):
"""
This type describes the requested resources for a given container. It describes the least amount of resources required for the container. A container can consume more than requested resources up to the specified limits before being restarted. Currently, the requested resources are treated as limits.
:param pulumi.Input[float] cpu: Requested number of CPU cores. At present, only full cores are supported.
:param pulumi.Input[float] memory_in_gb: The memory request in GB for this container.
"""
pulumi.set(__self__, "cpu", cpu)
pulumi.set(__self__, "memory_in_gb", memory_in_gb)
@property
@pulumi.getter
def cpu(self) -> pulumi.Input[float]:
"""
Requested number of CPU cores. At present, only full cores are supported.
"""
return pulumi.get(self, "cpu")
@cpu.setter
def cpu(self, value: pulumi.Input[float]):
pulumi.set(self, "cpu", value)
@property
@pulumi.getter(name="memoryInGB")
def memory_in_gb(self) -> pulumi.Input[float]:
"""
The memory request in GB for this container.
"""
return pulumi.get(self, "memory_in_gb")
@memory_in_gb.setter
def memory_in_gb(self, value: pulumi.Input[float]):
pulumi.set(self, "memory_in_gb", value)
@pulumi.input_type
class ResourceRequirementsArgs:
def __init__(__self__, *,
requests: pulumi.Input['ResourceRequestsArgs'],
limits: Optional[pulumi.Input['ResourceLimitsArgs']] = None):
"""
This type describes the resource requirements for a container or a service.
:param pulumi.Input['ResourceRequestsArgs'] requests: Describes the requested resources for a given container.
:param pulumi.Input['ResourceLimitsArgs'] limits: Describes the maximum limits on the resources for a given container.
"""
pulumi.set(__self__, "requests", requests)
if limits is not None:
pulumi.set(__self__, "limits", limits)
@property
@pulumi.getter
def requests(self) -> pulumi.Input['ResourceRequestsArgs']:
"""
Describes the requested resources for a given container.
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: pulumi.Input['ResourceRequestsArgs']):
pulumi.set(self, "requests", value)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input['ResourceLimitsArgs']]:
"""
Describes the maximum limits on the resources for a given container.
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input['ResourceLimitsArgs']]):
pulumi.set(self, "limits", value)
@pulumi.input_type
class ServiceResourceDescriptionArgs:
def __init__(__self__, *,
code_packages: pulumi.Input[Sequence[pulumi.Input['ContainerCodePackagePropertiesArgs']]],
os_type: pulumi.Input[Union[str, 'OperatingSystemTypes']],
description: Optional[pulumi.Input[str]] = None,
diagnostics: Optional[pulumi.Input['DiagnosticsRefArgs']] = None,
health_state: Optional[pulumi.Input[Union[str, 'HealthState']]] = None,
name: Optional[pulumi.Input[str]] = None,
network_refs: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkRefArgs']]]] = None,
replica_count: Optional[pulumi.Input[int]] = None):
"""
This type describes a service resource.
:param pulumi.Input[Sequence[pulumi.Input['ContainerCodePackagePropertiesArgs']]] code_packages: Describes the set of code packages that forms the service. A code package describes the container and the properties for running it. All the code packages are started together on the same host and share the same context (network, process etc.).
:param pulumi.Input[Union[str, 'OperatingSystemTypes']] os_type: The Operating system type required by the code in service.
:param pulumi.Input[str] description: User readable description of the service.
:param pulumi.Input['DiagnosticsRefArgs'] diagnostics: Reference to sinks in DiagnosticsDescription.
:param pulumi.Input[Union[str, 'HealthState']] health_state: The health state of a resource such as Application, Service, or Network.
:param pulumi.Input[str] name: The name of the resource
:param pulumi.Input[Sequence[pulumi.Input['NetworkRefArgs']]] network_refs: The names of the private networks that this service needs to be part of.
:param pulumi.Input[int] replica_count: The number of replicas of the service to create. Defaults to 1 if not specified.
"""
pulumi.set(__self__, "code_packages", code_packages)
pulumi.set(__self__, "os_type", os_type)
if description is not None:
pulumi.set(__self__, "description", description)
if diagnostics is not None:
pulumi.set(__self__, "diagnostics", diagnostics)
if health_state is not None:
pulumi.set(__self__, "health_state", health_state)
if name is not None:
pulumi.set(__self__, "name", name)
if network_refs is not None:
pulumi.set(__self__, "network_refs", network_refs)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
@property
@pulumi.getter(name="codePackages")
def code_packages(self) -> pulumi.Input[Sequence[pulumi.Input['ContainerCodePackagePropertiesArgs']]]:
"""
Describes the set of code packages that forms the service. A code package describes the container and the properties for running it. All the code packages are started together on the same host and share the same context (network, process etc.).
"""
return pulumi.get(self, "code_packages")
@code_packages.setter
def code_packages(self, value: pulumi.Input[Sequence[pulumi.Input['ContainerCodePackagePropertiesArgs']]]):
pulumi.set(self, "code_packages", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Input[Union[str, 'OperatingSystemTypes']]:
"""
The Operating system type required by the code in service.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: pulumi.Input[Union[str, 'OperatingSystemTypes']]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
User readable description of the service.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def diagnostics(self) -> Optional[pulumi.Input['DiagnosticsRefArgs']]:
"""
Reference to sinks in DiagnosticsDescription.
"""
return pulumi.get(self, "diagnostics")
@diagnostics.setter
def diagnostics(self, value: Optional[pulumi.Input['DiagnosticsRefArgs']]):
pulumi.set(self, "diagnostics", value)
@property
@pulumi.getter(name="healthState")
def health_state(self) -> Optional[pulumi.Input[Union[str, 'HealthState']]]:
"""
The health state of a resource such as Application, Service, or Network.
"""
return pulumi.get(self, "health_state")
@health_state.setter
def health_state(self, value: Optional[pulumi.Input[Union[str, 'HealthState']]]):
pulumi.set(self, "health_state", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkRefs")
def network_refs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NetworkRefArgs']]]]:
"""
The names of the private networks that this service needs to be part of.
"""
return pulumi.get(self, "network_refs")
@network_refs.setter
def network_refs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkRefArgs']]]]):
pulumi.set(self, "network_refs", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of replicas of the service to create. Defaults to 1 if not specified.
"""
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@pulumi.input_type
class SettingArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Describes a setting for the container.
:param pulumi.Input[str] name: The name of the setting.
:param pulumi.Input[str] value: The value of the setting.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the setting.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the setting.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class VolumeProviderParametersAzureFileArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
share_name: pulumi.Input[str],
account_key: Optional[pulumi.Input[str]] = None):
"""
This type describes a volume provided by an Azure Files file share.
:param pulumi.Input[str] account_name: Name of the Azure storage account for the File Share.
:param pulumi.Input[str] share_name: Name of the Azure Files file share that provides storage for the volume.
:param pulumi.Input[str] account_key: Access key of the Azure storage account for the File Share.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "share_name", share_name)
if account_key is not None:
pulumi.set(__self__, "account_key", account_key)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Name of the Azure storage account for the File Share.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="shareName")
def share_name(self) -> pulumi.Input[str]:
"""
Name of the Azure Files file share that provides storage for the volume.
"""
return pulumi.get(self, "share_name")
@share_name.setter
def share_name(self, value: pulumi.Input[str]):
pulumi.set(self, "share_name", value)
@property
@pulumi.getter(name="accountKey")
def account_key(self) -> Optional[pulumi.Input[str]]:
"""
Access key of the Azure storage account for the File Share.
"""
return pulumi.get(self, "account_key")
@account_key.setter
def account_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_key", value)
| 1.453125 | 1 |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_tunnel_nve_oper.py | tkamata-test/ydk-py | 0 | 12769616 | """ Cisco_IOS_XR_tunnel_nve_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR tunnel\-nve package operational data.
This module contains definitions
for the following management objects\:
nve\: NVE operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Nve(object):
"""
NVE operational data
.. attribute:: interfaces
Table for NVE interface attributes
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_nve_oper.Nve.Interfaces>`
.. attribute:: vnis
Table for VNIs
**type**\: :py:class:`Vnis <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_nve_oper.Nve.Vnis>`
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.interfaces = Nve.Interfaces()
self.interfaces.parent = self
self.vnis = Nve.Vnis()
self.vnis.parent = self
class Vnis(object):
"""
Table for VNIs
.. attribute:: vni
The attributes for a particular VNI
**type**\: list of :py:class:`Vni <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_nve_oper.Nve.Vnis.Vni>`
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vni = YList()
self.vni.parent = self
self.vni.name = 'vni'
class Vni(object):
"""
The attributes for a particular VNI
.. attribute:: vni <key>
VNI ID
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: bvi_ifh
BVI Interface Handle
**type**\: int
**range:** 0..4294967295
.. attribute:: bvi_mac
BVI MAC address
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: bvi_state
BVI Interface Oper State
**type**\: int
**range:** 0..255
.. attribute:: flags
Flags
**type**\: int
**range:** 0..4294967295
.. attribute:: interface_name
NVE Interface name
**type**\: str
.. attribute:: ipv4_tbl_id
IPv4 Table ID
**type**\: int
**range:** 0..4294967295
.. attribute:: ipv6_tbl_id
IPv6 Table ID
**type**\: int
**range:** 0..4294967295
.. attribute:: mcast_flags
McastFlags
**type**\: int
**range:** 0..4294967295
.. attribute:: mcast_ipv4_address
MCAST IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: state
State
**type**\: int
**range:** \-128..127
.. attribute:: topo_id
L2RIB Topology ID
**type**\: int
**range:** 0..4294967295
.. attribute:: topo_name
L2RIB Topology Name
**type**\: str
**length:** 0..50
.. attribute:: topo_valid
TOPO ID valid flag
**type**\: bool
.. attribute:: udp_port
UDP Port
**type**\: int
**range:** 0..4294967295
.. attribute:: vni_max
VNI Max in Range
**type**\: int
**range:** 0..4294967295
.. attribute:: vni_min
VNI Min in Range
**type**\: int
**range:** 0..4294967295
.. attribute:: vni_xr
VNI Number
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_id
L3 VRF ID
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_name
L3 VRF Name
**type**\: str
.. attribute:: vrf_vni
VRF VNI
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vni = None
self.bvi_ifh = None
self.bvi_mac = None
self.bvi_state = None
self.flags = None
self.interface_name = None
self.ipv4_tbl_id = None
self.ipv6_tbl_id = None
self.mcast_flags = None
self.mcast_ipv4_address = None
self.state = None
self.topo_id = None
self.topo_name = None
self.topo_valid = None
self.udp_port = None
self.vni_max = None
self.vni_min = None
self.vni_xr = None
self.vrf_id = None
self.vrf_name = None
self.vrf_vni = None
@property
def _common_path(self):
if self.vni is None:
raise YPYModelError('Key property vni is None')
return '/Cisco-IOS-XR-tunnel-nve-oper:nve/Cisco-IOS-XR-tunnel-nve-oper:vnis/Cisco-IOS-XR-tunnel-nve-oper:vni[Cisco-IOS-XR-tunnel-nve-oper:vni = ' + str(self.vni) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.vni is not None:
return True
if self.bvi_ifh is not None:
return True
if self.bvi_mac is not None:
return True
if self.bvi_state is not None:
return True
if self.flags is not None:
return True
if self.interface_name is not None:
return True
if self.ipv4_tbl_id is not None:
return True
if self.ipv6_tbl_id is not None:
return True
if self.mcast_flags is not None:
return True
if self.mcast_ipv4_address is not None:
return True
if self.state is not None:
return True
if self.topo_id is not None:
return True
if self.topo_name is not None:
return True
if self.topo_valid is not None:
return True
if self.udp_port is not None:
return True
if self.vni_max is not None:
return True
if self.vni_min is not None:
return True
if self.vni_xr is not None:
return True
if self.vrf_id is not None:
return True
if self.vrf_name is not None:
return True
if self.vrf_vni is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve.Vnis.Vni']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-nve-oper:nve/Cisco-IOS-XR-tunnel-nve-oper:vnis'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.vni is not None:
for child_ref in self.vni:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve.Vnis']['meta_info']
class Interfaces(object):
"""
Table for NVE interface attributes
.. attribute:: interface
The attributes for a particular interface
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_nve_oper.Nve.Interfaces.Interface>`
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
The attributes for a particular interface
.. attribute:: interface_name <key>
Interface Name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: admin_state
Admin State
**type**\: int
**range:** \-128..127
.. attribute:: any_cast_source_interface_name
Anycast Source Interface name
**type**\: str
.. attribute:: any_cast_source_ipv4_address
Anycast Source IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: any_cast_source_state
Anycast Source Interface State
**type**\: int
**range:** \-128..127
.. attribute:: encap
Encap
**type**\: int
**range:** \-128..127
.. attribute:: flags
Flags
**type**\: int
**range:** 0..4294967295
.. attribute:: if_handle
NVE IfHandle
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: interface_name_xr
Interface name
**type**\: str
.. attribute:: source_interface_name
Source Interface name
**type**\: str
.. attribute:: source_ipv4_address
Source IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: source_state
Source Intf State
**type**\: int
**range:** \-128..127
.. attribute:: state
State
**type**\: int
**range:** \-128..127
.. attribute:: sync_mcast_flags
Sync McastFlags
**type**\: int
**range:** 0..4294967295
.. attribute:: sync_mcast_ipv4_address
MCAST sync group IPv4 Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: udp_port
UDP Port
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-nve-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.admin_state = None
self.any_cast_source_interface_name = None
self.any_cast_source_ipv4_address = None
self.any_cast_source_state = None
self.encap = None
self.flags = None
self.if_handle = None
self.interface_name_xr = None
self.source_interface_name = None
self.source_ipv4_address = None
self.source_state = None
self.state = None
self.sync_mcast_flags = None
self.sync_mcast_ipv4_address = None
self.udp_port = None
@property
def _common_path(self):
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return '/Cisco-IOS-XR-tunnel-nve-oper:nve/Cisco-IOS-XR-tunnel-nve-oper:interfaces/Cisco-IOS-XR-tunnel-nve-oper:interface[Cisco-IOS-XR-tunnel-nve-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.admin_state is not None:
return True
if self.any_cast_source_interface_name is not None:
return True
if self.any_cast_source_ipv4_address is not None:
return True
if self.any_cast_source_state is not None:
return True
if self.encap is not None:
return True
if self.flags is not None:
return True
if self.if_handle is not None:
return True
if self.interface_name_xr is not None:
return True
if self.source_interface_name is not None:
return True
if self.source_ipv4_address is not None:
return True
if self.source_state is not None:
return True
if self.state is not None:
return True
if self.sync_mcast_flags is not None:
return True
if self.sync_mcast_ipv4_address is not None:
return True
if self.udp_port is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-nve-oper:nve/Cisco-IOS-XR-tunnel-nve-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve.Interfaces']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-nve-oper:nve'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interfaces is not None and self.interfaces._has_data():
return True
if self.vnis is not None and self.vnis._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_nve_oper as meta
return meta._meta_table['Nve']['meta_info']
| 1.867188 | 2 |
src/scs_philips_hue/cmd/cmd_mqtt_subscriber.py | south-coast-science/scs_philips_hue | 0 | 12769617 | <reponame>south-coast-science/scs_philips_hue<filename>src/scs_philips_hue/cmd/cmd_mqtt_subscriber.py
"""
Created on 23 Mar 2017
@author: <NAME> (<EMAIL>)
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdMQTTSubscriber(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog {-c | -t TOPIC_PATH } [-s UDS_SUB] [-v]",
version="%prog 1.0")
# compulsory...
self.__parser.add_option("--conf", "-c", action="store_true", dest="use_domain_conf", default=False,
help="get topic path from domain conf")
self.__parser.add_option("--topic", "-t", type="string", nargs=1, action="store", dest="topic_path",
help="use the given topic path")
# optional...
self.__parser.add_option("--sub", "-s", type="string", nargs=1, action="store", dest="uds_sub",
help="write subscribed documents to UDS instead of stdout")
self.__parser.add_option("--echo", "-e", action="store_true", dest="echo", default=False,
help="also write subscribed documents to stderr")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.use_domain_conf and self.topic_path is not None:
return False
if not self.use_domain_conf and self.topic_path is None:
return False
return True
# ----------------------------------------------------------------------------------------------------------------
@property
def use_domain_conf(self):
return self.__opts.use_domain_conf
@property
def topic_path(self):
return self.__opts.topic_path
@property
def uds_sub(self):
return self.__opts.uds_sub
@property
def echo(self):
return self.__opts.echo
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdMQTTSubscriber:{use_domain_conf:%s, topic_path:%s, uds_sub:%s, echo:%s, verbose:%s}" % \
(self.use_domain_conf, self.topic_path, self.uds_sub, self.echo, self.verbose)
| 2.15625 | 2 |
plagiarism-checker.py | scv1702/plagiarism-checker | 0 | 12769618 | import os.path
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from numpy import dot
from numpy.linalg import norm
class NotIntegerError(Exception):
pass
# 문서를 불러와 단어로 토큰화 후, 단어들을 word_list에 저장후 word_list 반환
def doc_tokenize(doc_name):
with open(doc_name, 'rt') as fp:
string = fp.read()
word_list = word_tokenize(string)
# 유사도 계산시 정확성을 높이기 위해 큰 의미가 없는 단어인 불용어를 word_list에서 제거
word_list = [word for word in word_list if word not in stop_words]
# 소문자와 대문자로 인해 의미 구별이 되는 것을 방지하기 위해, 모든 단어를 소문자화
word_list = [word.lower() if word.islower() == False else word for word in word_list]
return word_list
# list안 word의 term frequency 값 계산 후 dict 형태로 반환
def tf(list):
tf_dict = {word : list.count(word) if word in list else 0 for word in word_zip}
return tf_dict
# list안 word의 tf 값과 idf 값을 곱하여 tf-idf 값 계산 후 알파벳 순으로 정렬하여 list 원소가 (word, tf-idf) 형식을 가진 list 형태로 반환
def tf_idf(list):
tf_dict = tf(list)
tf_idf_dict = {word : tf_dict[word] * idf_dict[word] for word in tf_dict.keys()}
return sorted(tf_idf_dict.items())
# doc_1과 doc_2 문서의 cosine 유사도를 계산 후 유사도 값을 반환
def cos_similarity(doc_1_name, doc_2_name):
# doc_1과 doc_2 문서의 tf-idf값 계산
doc_1 = tf_idf(doc_tokenize(doc_1_name))
doc_2 = tf_idf(doc_tokenize(doc_2_name))
# doc_1의 word의 tf-idf 값을 vactor_1에 할당
vector_1 = [value[1] for value in doc_1]
# doc_2의 word의 tf-idf 값을 vactor_2에 할당
vector_2 = [value[1] for value in doc_2]
# vector_1과 vector_2 사이의 각도를 구한후 100을 곱하여 % 수치로 반환, 소숫점 2자리까지 반올림
return round((dot(vector_1, vector_2) / (norm(vector_1) * norm(vector_2)))*100, 2)
while True:
try:
# 문서 수 입력
doc_count = float(input('Please enter the count of documents : '))
if doc_count % 1 != 0:
raise NotIntegerError()
doc_count = int(doc_count)
doc_name_list = []
i = 0
while i < doc_count:
doc_name = input(f'Please enter the name of documents [{i + 1}{"/"}{doc_count}] : ') + ".txt"
# 존재하지 않은 문서 이름을 입력시 다시 입력, 존재하는 문서 입력시 doc_name_list에 할당
if os.path.isfile(doc_name):
doc_name_list.append(doc_name)
i += 1
else:
print('Please enter the name of an existing document.')
break
except ValueError:
# 문서 수를 입력할 때 숫자를 입력하지 않으면 excpet 발생
print('Please enter the number.')
except NotIntegerError:
# 문서 수를 입력할 때 정수를 입력하지 않으면 excpet 발생
print('Please enter the integer.')
stop_words = set(stopwords.words('english'))
# idf 값을 계산하기 위해 모든 문서를 doc_zip에 할당
doc_zip = [doc_tokenize(name) for name in doc_name_list]
# tf-idf 값을 계산하기 위해 모든 문서의 단어를 중복되지 않게 word_zip에 할당
word_zip = list(set([word for doc in doc_zip for word in doc]))
# 각 단어마다 inverse document frequency 값 계산 후 dict에 할당
idf_dict = {}
for word in word_zip:
word_count = 0
for doc in doc_zip:
if word in doc:
word_count += 1
idf_dict[word] = np.log((1 + doc_count) / (word_count))
# 경로 상의 모든 문서의 서로 간의 유사도를 계산 후 similarity_dict에 저장
similarity_dict = {(doc_name_list[i], doc_name_list[j]) : cos_similarity(doc_name_list[i], doc_name_list[j]) for i in range(len(doc_name_list)-1) for j in range(i+1, doc_count)}
# 유사도가 가장 큰 문서 2개를 계산 후 출력
key_min = max(similarity_dict.keys(), key = lambda x: similarity_dict[x])
value_min = max(similarity_dict.values())
print(f"The similarity between {key_min[0]} and {key_min[1]} is highest at {value_min}%") | 2.828125 | 3 |
cloudendure/cloudendure_api/models/cloud_endure_user.py | cloudreach/cloudendure-python | 11 | 12769619 | # coding: utf-8
"""
CloudEndure API documentation
© 2017 CloudEndure All rights reserved # General Request authentication in CloudEndure's API is done using session cookies. A session cookie is returned upon successful execution of the \"login\" method. This value must then be provided within the request headers of all subsequent API requests. ## Errors Some errors are not specifically written in every method since they may always return. Those are: 1) 401 (Unauthorized) - for unauthenticated requests. 2) 405 (Method Not Allowed) - for using a method that is not supported (POST instead of GET). 3) 403 (Forbidden) - request is authenticated, but the user is not allowed to access. 4) 422 (Unprocessable Entity) - for invalid input. ## Formats All strings with date-time format are according to RFC3339. All strings with \"duration\" format are according to ISO8601. For example, a full day duration can be specified with \"PNNNND\". # noqa: E501
OpenAPI spec version: 5
Contact: https://bit.ly/2T54hSc
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CloudEndureUser:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"username": "str",
"status": "str",
"account": "str",
"roles": "list[str]",
"settings": "object",
"api_token": "str",
"has_password": "bool",
"terms_accepted": "bool",
"id": "str",
"self_link": "str",
}
attribute_map = {
"username": "username",
"status": "status",
"account": "account",
"roles": "roles",
"settings": "settings",
"api_token": "apiToken",
"has_password": "<PASSWORD>",
"terms_accepted": "termsAccepted",
"id": "id",
"self_link": "selfLink",
}
def __init__(
self,
username=None,
status=None,
account=None,
roles=None,
settings=None,
api_token=None,
has_password=None,
terms_accepted=None,
id=None,
self_link=None,
): # noqa: E501
"""CloudEndureUser - a model defined in Swagger""" # noqa: E501
self._username = None
self._status = None
self._account = None
self._roles = None
self._settings = None
self._api_token = None
self._has_password = None
self._terms_accepted = None
self._id = None
self._self_link = None
self.discriminator = None
if username is not None:
self.username = username
if status is not None:
self.status = status
if account is not None:
self.account = account
if roles is not None:
self.roles = roles
if settings is not None:
self.settings = settings
if api_token is not None:
self.api_token = api_token
if has_password is not None:
self.has_password = has_password
if terms_accepted is not None:
self.terms_accepted = terms_accepted
if id is not None:
self.id = id
if self_link is not None:
self.self_link = self_link
@property
def username(self):
"""Gets the username of this CloudEndureUser. # noqa: E501
:return: The username of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CloudEndureUser.
:param username: The username of this CloudEndureUser. # noqa: E501
:type: str
"""
self._username = username
@property
def status(self):
"""Gets the status of this CloudEndureUser. # noqa: E501
:return: The status of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this CloudEndureUser.
:param status: The status of this CloudEndureUser. # noqa: E501
:type: str
"""
allowed_values = ["PENDING", "CONFIRMED", "DELETED"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}".format( # noqa: E501
status, allowed_values
)
)
self._status = status
@property
def account(self):
"""Gets the account of this CloudEndureUser. # noqa: E501
:return: The account of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this CloudEndureUser.
:param account: The account of this CloudEndureUser. # noqa: E501
:type: str
"""
self._account = account
@property
def roles(self):
"""Gets the roles of this CloudEndureUser. # noqa: E501
:return: The roles of this CloudEndureUser. # noqa: E501
:rtype: list[str]
"""
return self._roles
@roles.setter
def roles(self, roles):
"""Sets the roles of this CloudEndureUser.
:param roles: The roles of this CloudEndureUser. # noqa: E501
:type: list[str]
"""
allowed_values = [
"USER",
"ACCOUNT_ADMIN",
"ACCOUNT_OWNER",
"GLOBAL_READONLY",
] # noqa: E501
if not set(roles).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `roles` [{0}], must be a subset of [{1}]".format( # noqa: E501
", ".join(map(str, set(roles) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)),
)
)
self._roles = roles
@property
def settings(self):
"""Gets the settings of this CloudEndureUser. # noqa: E501
:return: The settings of this CloudEndureUser. # noqa: E501
:rtype: object
"""
return self._settings
@settings.setter
def settings(self, settings):
"""Sets the settings of this CloudEndureUser.
:param settings: The settings of this CloudEndureUser. # noqa: E501
:type: object
"""
self._settings = settings
@property
def api_token(self):
"""Gets the api_token of this CloudEndureUser. # noqa: E501
:return: The api_token of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._api_token
@api_token.setter
def api_token(self, api_token):
"""Sets the api_token of this CloudEndureUser.
:param api_token: The api_token of this CloudEndureUser. # noqa: E501
:type: str
"""
self._api_token = api_token
@property
def has_password(self):
"""Gets the has_password of this CloudEndureUser. # noqa: E501
:return: The has_password of this CloudEndureUser. # noqa: E501
:rtype: bool
"""
return self._has_password
@has_password.setter
def has_password(self, has_password):
"""Sets the has_password of this CloudEndureUser.
:param has_password: The has_password of this CloudEndureUser. # noqa: E501
:type: bool
"""
self._has_password = has_password
@property
def terms_accepted(self):
"""Gets the terms_accepted of this CloudEndureUser. # noqa: E501
todo one-way; cannot be set at time of POST # noqa: E501
:return: The terms_accepted of this CloudEndureUser. # noqa: E501
:rtype: bool
"""
return self._terms_accepted
@terms_accepted.setter
def terms_accepted(self, terms_accepted):
"""Sets the terms_accepted of this CloudEndureUser.
todo one-way; cannot be set at time of POST # noqa: E501
:param terms_accepted: The terms_accepted of this CloudEndureUser. # noqa: E501
:type: bool
"""
self._terms_accepted = terms_accepted
@property
def id(self):
"""Gets the id of this CloudEndureUser. # noqa: E501
:return: The id of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CloudEndureUser.
:param id: The id of this CloudEndureUser. # noqa: E501
:type: str
"""
self._id = id
@property
def self_link(self):
"""Gets the self_link of this CloudEndureUser. # noqa: E501
:return: The self_link of this CloudEndureUser. # noqa: E501
:rtype: str
"""
return self._self_link
@self_link.setter
def self_link(self, self_link):
"""Sets the self_link of this CloudEndureUser.
:param self_link: The self_link of this CloudEndureUser. # noqa: E501
:type: str
"""
self._self_link = self_link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(CloudEndureUser, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudEndureUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.40625 | 2 |
alipay/aop/api/domain/QuotaGradientRule.py | antopen/alipay-sdk-python-all | 213 | 12769620 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class QuotaGradientRule(object):
def __init__(self):
self._score_400 = None
self._score_450 = None
self._score_500 = None
self._score_550 = None
self._score_600 = None
self._score_650 = None
self._score_700 = None
self._score_750 = None
self._score_800 = None
self._score_850 = None
self._score_900 = None
self._score_950 = None
@property
def score_400(self):
return self._score_400
@score_400.setter
def score_400(self, value):
self._score_400 = value
@property
def score_450(self):
return self._score_450
@score_450.setter
def score_450(self, value):
self._score_450 = value
@property
def score_500(self):
return self._score_500
@score_500.setter
def score_500(self, value):
self._score_500 = value
@property
def score_550(self):
return self._score_550
@score_550.setter
def score_550(self, value):
self._score_550 = value
@property
def score_600(self):
return self._score_600
@score_600.setter
def score_600(self, value):
self._score_600 = value
@property
def score_650(self):
return self._score_650
@score_650.setter
def score_650(self, value):
self._score_650 = value
@property
def score_700(self):
return self._score_700
@score_700.setter
def score_700(self, value):
self._score_700 = value
@property
def score_750(self):
return self._score_750
@score_750.setter
def score_750(self, value):
self._score_750 = value
@property
def score_800(self):
return self._score_800
@score_800.setter
def score_800(self, value):
self._score_800 = value
@property
def score_850(self):
return self._score_850
@score_850.setter
def score_850(self, value):
self._score_850 = value
@property
def score_900(self):
return self._score_900
@score_900.setter
def score_900(self, value):
self._score_900 = value
@property
def score_950(self):
return self._score_950
@score_950.setter
def score_950(self, value):
self._score_950 = value
def to_alipay_dict(self):
params = dict()
if self.score_400:
if hasattr(self.score_400, 'to_alipay_dict'):
params['score_400'] = self.score_400.to_alipay_dict()
else:
params['score_400'] = self.score_400
if self.score_450:
if hasattr(self.score_450, 'to_alipay_dict'):
params['score_450'] = self.score_450.to_alipay_dict()
else:
params['score_450'] = self.score_450
if self.score_500:
if hasattr(self.score_500, 'to_alipay_dict'):
params['score_500'] = self.score_500.to_alipay_dict()
else:
params['score_500'] = self.score_500
if self.score_550:
if hasattr(self.score_550, 'to_alipay_dict'):
params['score_550'] = self.score_550.to_alipay_dict()
else:
params['score_550'] = self.score_550
if self.score_600:
if hasattr(self.score_600, 'to_alipay_dict'):
params['score_600'] = self.score_600.to_alipay_dict()
else:
params['score_600'] = self.score_600
if self.score_650:
if hasattr(self.score_650, 'to_alipay_dict'):
params['score_650'] = self.score_650.to_alipay_dict()
else:
params['score_650'] = self.score_650
if self.score_700:
if hasattr(self.score_700, 'to_alipay_dict'):
params['score_700'] = self.score_700.to_alipay_dict()
else:
params['score_700'] = self.score_700
if self.score_750:
if hasattr(self.score_750, 'to_alipay_dict'):
params['score_750'] = self.score_750.to_alipay_dict()
else:
params['score_750'] = self.score_750
if self.score_800:
if hasattr(self.score_800, 'to_alipay_dict'):
params['score_800'] = self.score_800.to_alipay_dict()
else:
params['score_800'] = self.score_800
if self.score_850:
if hasattr(self.score_850, 'to_alipay_dict'):
params['score_850'] = self.score_850.to_alipay_dict()
else:
params['score_850'] = self.score_850
if self.score_900:
if hasattr(self.score_900, 'to_alipay_dict'):
params['score_900'] = self.score_900.to_alipay_dict()
else:
params['score_900'] = self.score_900
if self.score_950:
if hasattr(self.score_950, 'to_alipay_dict'):
params['score_950'] = self.score_950.to_alipay_dict()
else:
params['score_950'] = self.score_950
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = QuotaGradientRule()
if 'score_400' in d:
o.score_400 = d['score_400']
if 'score_450' in d:
o.score_450 = d['score_450']
if 'score_500' in d:
o.score_500 = d['score_500']
if 'score_550' in d:
o.score_550 = d['score_550']
if 'score_600' in d:
o.score_600 = d['score_600']
if 'score_650' in d:
o.score_650 = d['score_650']
if 'score_700' in d:
o.score_700 = d['score_700']
if 'score_750' in d:
o.score_750 = d['score_750']
if 'score_800' in d:
o.score_800 = d['score_800']
if 'score_850' in d:
o.score_850 = d['score_850']
if 'score_900' in d:
o.score_900 = d['score_900']
if 'score_950' in d:
o.score_950 = d['score_950']
return o
| 2.0625 | 2 |
landmark_generate/generate.py | yourmean/Moving-Emoji-Generation_Sol1 | 4 | 12769621 | <reponame>yourmean/Moving-Emoji-Generation_Sol1<filename>landmark_generate/generate.py<gh_stars>1-10
import os
import torch
import matplotlib.pyplot as plt
import numpy as np
generator = torch.load('generator_100000.pytorch', map_location={'cuda:0':'cpu'})
generator.eval()
# v, _ = generator.sample_videos(1, 16)
# v.shape
# v = v.detach()
# video = [v[:,:,i,:,:] for i in range(16)]
# s = video[2]
# s = s[0]
# s *= 255
# s = s.type(torch.uint8)
# s.shape
# plt.imshow(s.permute(1,2,0))
def videos_to_numpy(tensor):
generated = tensor.data.cpu().numpy().transpose(0, 1, 2, 3, 4)
generated[generated < -1] = -1
generated[generated > 1] = 1
generated = (generated + 1) / 2 * 255
return generated.astype('uint8')
v, _ = generator.sample_videos(1,16)
video = videos_to_numpy(v).squeeze().transpose((1,2,3,0))
for i in range(16):
temp = video[i]
plt.imshow(temp)
plt.show()
| 2.5 | 2 |
lilu/context.py | xyla-io/lilu | 0 | 12769622 | from enum import Enum
from typing import Optional, List, Dict
class TimeGranularity(Enum):
hourly = 'hourly'
daily = 'daily'
@property
def api_value(self) -> str:
if self is TimeGranularity.hourly:
return 'STAT_TIME_GRANULARITY_HOURLY'
elif self is TimeGranularity.daily:
return 'STAT_TIME_GRANULARITY_DAILY'
@property
def api_column(self) -> str:
return 'stat_datetime'
class EntityGranularity(Enum):
campaign = 'campaign'
adgroup = 'adgroup'
ad = 'ad'
@property
def prefix(self) -> str:
return f'{self.value}_'
def performance_to_api_column(self, performance_column: str) -> Optional[str]:
api_column = performance_column.split(self.prefix, maxsplit=1)[1]
if api_column in [
'active_cost',
'active_rate',
]:
api_column = None
return api_column
def api_to_performance_column(self, api_column: str) -> Optional[str]:
for performance_column in self.performance_columns:
if self.performance_to_api_column(performance_column) == api_column:
return performance_column
return None
@property
def entity_columns(self) -> List[str]:
if self is EntityGranularity.campaign:
return [
'campaign_advertiser_id',
'campaign_campaign_id',
'campaign_campaign_name',
'campaign_budget_mode',
'campaign_budget',
'campaign_objective_type',
'campaign_create_time',
'campaign_modify_time',
'campaign_status',
'campaign_opt_status',
'campaign_objective',
]
elif self is EntityGranularity.adgroup:
return [
'adgroup_advertiser_id',
'adgroup_adgroup_id',
'adgroup_adgroup_name',
'adgroup_campaign_id',
'adgroup_campaign_name',
'adgroup_placement_type',
'adgroup_placement',
'adgroup_enable_inventory_filter',
'adgroup_landing_page_url',
'adgroup_display_name',
'adgroup_app_id',
'adgroup_app_download_url',
'adgroup_open_url',
'adgroup_app_name',
'adgroup_app_type',
'adgroup_package',
'adgroup_category',
'adgroup_keywords',
'adgroup_avatar_icon',
'adgroup_is_comment_disable',
'adgroup_android_osv',
'adgroup_ios_osv',
'adgroup_audience',
'adgroup_excluded_audience',
'adgroup_gender',
'adgroup_location',
'adgroup_age',
'adgroup_languages',
'adgroup_connection_type',
'adgroup_operation_system',
'adgroup_device_price',
'adgroup_interest_category',
'adgroup_budget',
'adgroup_budget_mode',
'adgroup_pacing',
'adgroup_frequency',
'adgroup_frequency_schedule',
'adgroup_schedule_type',
'adgroup_schedule_start_time',
'adgroup_schedule_end_time',
'adgroup_dayparting',
'adgroup_billing_event',
'adgroup_bid',
'adgroup_conversion_id',
'adgroup_skip_learning_phase',
'adgroup_conversion_bid',
'adgroup_impression_tracking_url',
'adgroup_click_tracking_url',
'adgroup_video_view_tracking_url',
'adgroup_create_time',
'adgroup_modify_time',
'adgroup_creative_material_mode',
'adgroup_optimize_goal',
'adgroup_external_action',
'adgroup_deep_external_action',
'adgroup_deep_bid_type',
'adgroup_status',
'adgroup_pixel_id',
'adgroup_profile_image',
'adgroup_deep_cpabid',
'adgroup_opt_status',
'adgroup_bid_type',
'adgroup_statistic_type',
]
elif self is EntityGranularity.ad:
return [
'ad_advertiser_id',
'ad_ad_id',
'ad_ad_name',
'ad_campaign_name',
'ad_adgroup_id',
'ad_adgroup_name',
'ad_campaign_id',
'ad_status',
'ad_opt_status',
'ad_call_to_action',
'ad_video_id',
'ad_image_ids',
'ad_create_time',
'ad_modify_time',
'ad_is_aco',
'ad_image_mode',
'ad_profile_image',
'ad_click_tracking_url',
'ad_display_name',
'ad_impression_tracking_url',
'ad_video_view_tracking_url',
'ad_landing_page_url',
'ad_open_url',
'ad_app_name',
]
@property
def performance_columns(self) -> List[str]:
if self is EntityGranularity.campaign:
return [
'campaign_campaign_id',
'campaign_campaign_name',
'campaign_active_register',
'campaign_skip',
'campaign_active_register_rate',
'campaign_active_rate',
'campaign_active_pay_amount',
'campaign_active_pay_avg_amount',
'campaign_dy_comment',
'campaign_active_pay_cost',
'campaign_conversion_rate',
'campaign_active_pay_show',
'campaign_ecpm',
'campaign_active_register_click_cost',
'campaign_active_register_show_cost',
'campaign_active_register_click',
'campaign_conversion_cost',
'campaign_active_click_cost',
'campaign_stat_cost',
'campaign_active_pay_click_cost',
'campaign_active_register_cost',
'campaign_active_pay_click',
'campaign_dy_like',
'campaign_active_pay_rate',
'campaign_click_cost',
'campaign_active_show',
'campaign_active_click',
'campaign_active',
'campaign_convert_cnt',
'campaign_show_cnt',
'campaign_dy_share',
'campaign_activate_cost',
'campaign_ctr',
'campaign_active_pay',
'campaign_active_cost',
'campaign_active_register_show',
'campaign_active_pay_show_cost',
'campaign_activate_rate',
'campaign_time_attr_convert_cnt',
'campaign_click_cnt',
'campaign_dy_home_visited',
'campaign_active_show_cost',
]
elif self is EntityGranularity.adgroup:
return [
# 'adgroup_campaign_id',
# 'adgroup_campaign_name',
'adgroup_adgroup_id',
'adgroup_adgroup_name',
'adgroup_active_register',
'adgroup_skip',
'adgroup_active_register_rate',
'adgroup_active_rate',
'adgroup_active_pay_amount',
'adgroup_active_pay_avg_amount',
'adgroup_dy_comment',
'adgroup_active_pay_cost',
'adgroup_conversion_rate',
'adgroup_active_pay_show',
'adgroup_ecpm',
'adgroup_active_register_click_cost',
'adgroup_active_register_show_cost',
'adgroup_dy_like',
'adgroup_conversion_cost',
'adgroup_active_click_cost',
'adgroup_stat_cost',
'adgroup_active_pay_click_cost',
'adgroup_active_register_cost',
'adgroup_active_pay_click',
'adgroup_active_register_click',
'adgroup_active_pay_rate',
'adgroup_click_cost',
'adgroup_active_show',
'adgroup_active_click',
'adgroup_active',
'adgroup_convert_cnt',
'adgroup_show_cnt',
'adgroup_dy_share',
'adgroup_activate_cost',
'adgroup_ctr',
'adgroup_active_pay',
'adgroup_active_cost',
'adgroup_active_register_show',
'adgroup_active_pay_show_cost',
'adgroup_activate_rate',
'adgroup_time_attr_convert_cnt',
'adgroup_click_cnt',
'adgroup_dy_home_visited',
'adgroup_active_show_cost',
]
elif self is EntityGranularity.ad:
return [
# 'ad_campaign_id',
# 'ad_campaign_name',
# 'ad_adgroup_id',
# 'ad_adgroup_name',
'ad_ad_id',
'ad_ad_name',
'ad_active_register',
'ad_skip',
'ad_active_register_rate',
'ad_active_rate',
'ad_active_pay_amount',
'ad_active_pay_avg_amount',
'ad_dy_comment',
'ad_active_pay_cost',
'ad_active',
'ad_conversion_rate',
'ad_active_pay_show',
'ad_ecpm',
'ad_active_register_click_cost',
'ad_active_register_show_cost',
'ad_dy_like',
'ad_conversion_cost',
'ad_active_click_cost',
'ad_stat_cost',
'ad_active_pay_click_cost',
'ad_active_register_cost',
'ad_active_pay_click',
'ad_active_register_click',
'ad_active_pay_rate',
'ad_ad_text',
'ad_click_cost',
'ad_active_show',
'ad_active_click',
'ad_convert_cnt',
'ad_show_cnt',
'ad_dy_share',
'ad_activate_cost',
'ad_ctr',
'ad_active_pay',
'ad_active_cost',
'ad_active_register_show',
'ad_active_pay_show_cost',
'ad_activate_rate',
'ad_time_attr_convert_cnt',
'ad_click_cnt',
'ad_dy_home_visited',
'ad_active_show_cost',
] | 2.765625 | 3 |
examples/raspicam.py | tanyafish/unicorn-hat-hd | 0 | 12769623 | <filename>examples/raspicam.py
#!/usr/bin/env python
# This is a modified version of <NAME>'s Astro Cam example,
# from: https://github.com/bennuttall/sense-hat-examples/blob/master/python/astro_cam.py
try:
from picamera import PiCamera
from picamera.array import PiRGBArray
except ImportError:
exit('This script requires the picamera module\nInstall with: sudo pip install picamera')
import unicornhathd
print("""Unicorn HAT HD: Raspberry Pi Camera Display
Show a 16x16 feed from your Raspberry Pi camera!
""")
while True:
with PiCamera() as camera:
camera.resolution = (32, 32)
with PiRGBArray(camera, size=(16, 16)) as stream:
camera.capture(stream, format='rgb', resize=(16, 16))
image = stream.array
for y, row in enumerate(image):
for x, pixel in enumerate(row):
r, g, b = pixel
unicornhathd.set_pixel(x, y, r, g, b)
unicornhathd.show()
| 2.921875 | 3 |
Plots/Contours/NCL_conwomap_2.py | learn2free/GeoCAT-examples | 1 | 12769624 | """
NCL_conwomap_2.py
=================
This script illustrates the following concepts:
- Drawing a simple filled contour plot
- Selecting a different color map
- Changing the size/shape of a contour plot
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/conwomap_2.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/conwomap_2_lg.png
"""
import cartopy.crs as ccrs
import geocat.datafiles as gdf
import matplotlib.pyplot as plt
###############################################################################
# Import packages:
import numpy as np
import xarray as xr
from geocat.viz import cmaps as gvcmaps
from geocat.viz import util as gvutil
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
ds = xr.open_dataset(gdf.get("netcdf_files/cone.nc"))
u = ds.u.isel(time=4)
###############################################################################
# Plot:
# Generate figure (set its size (width, height) in inches)
plt.figure(figsize=(10, 6))
# Generate axes, using Cartopy
projection = ccrs.PlateCarree()
ax = plt.axes(projection=projection)
# Import an NCL colormap
newcmp = gvcmaps.gui_default
# Contourf-plot data (for filled contours)
p = u.plot.contourf(ax=ax,
vmin=-1,
vmax=10,
levels=12,
cmap=newcmp,
add_colorbar=False,
transform=projection,
extend='neither',
add_labels=False)
# Contour-plot data (for borderlines)
u.plot.contour(ax=ax,
vmin=-1,
vmax=10,
levels=12,
linewidths=0.5,
colors='black',
add_colorbar=False,
transform=projection,
extend='neither',
add_labels=False)
# Add horizontal colorbar
cbar = plt.colorbar(p, orientation='horizontal', shrink=0.5)
cbar.ax.tick_params(labelsize=16)
cbar.set_ticks(np.linspace(0, 9, 10))
# Use geocat.viz.util convenience function to set axes limits & tick values without calling several matplotlib functions
gvutil.set_axes_limits_and_ticks(ax,
xlim=(0, 49),
ylim=(0, 29),
xticks=np.linspace(0, 40, 5),
yticks=np.linspace(0, 25, 6))
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax,
x_minor_per_major=5,
y_minor_per_major=5,
labelsize=16)
# Use geocat.viz.util convenience function to add titles to left and right of the plot axis.
gvutil.set_titles_and_labels(ax,
lefttitle="Cone amplitude",
lefttitlefontsize=18,
righttitle="ndim",
righttitlefontsize=18,
xlabel="X",
ylabel="Y",
labelfontsize=18)
# Show the plot
plt.show()
| 3.28125 | 3 |
rename2.py | a67878813/script | 0 | 12769625 | # -*- coding: gbk -*-
path1 = u'K:\\选择删除\\' #所需修改文件夹所在路径
import os
import zhconv
for parent, dirnames, filenames in os.walk(path1):
for filename in filenames:
try:
os.rename(os.path.join(parent, filename), os.path.join(parent, zhconv.convert(filename, 'zh-cn')))
#print(zhconv.convert(filename, 'zh-cn'))
except:
print("文件重命名错误" + str(filename))
import os, sys
dirs = os.listdir(path1)
for dir in dirs:
try:
os.rename(path1+str(dir),path1+zhconv.convert(dir, 'zh-cn'))
#print(zhconv.convert(dir, 'zh-cn'))
except:
print("目录重命名错误" + path1+str(dir))
| 3.3125 | 3 |
sampyl/samplers/base.py | madanh/sampyl | 0 | 12769626 | from itertools import count
import time
from ..core import np, auto_grad_logp, AUTOGRAD
from ..parallel import parallel
from ..progressbar import update_progress
from ..state import State, func_var_names
from ..model import init_model
class Sampler(object):
def __init__(self, logp, start,
grad_logp=None,
scale=None,
condition=None,
grad_logp_flag=True,
random_seed=None):
self.model = init_model(logp, grad_logp, grad_logp_flag)
self._logp_func = logp
self._grad_func = grad_logp
self.var_names = func_var_names(logp)
self.state = State.fromkeys(self.var_names)
self.state.update(start)
self.scale = default_scale(scale, self.state)
self.sampler = None
self._sampled = 0
self._accepted = 0
self.conditional = condition
self._grad_logp_flag = grad_logp_flag
self.seed = random_seed
if random_seed:
np.random.seed(random_seed)
if condition is not None:
self._joint_logp = self._logp_func
def _conditional_step(self):
""" Build a conditional logp and sample from it. """
if self.conditional is None:
return self.step()
frozen_vars = self.conditional
frozen_state = self.state
free_vars = [var for var in self.state if var not in frozen_vars]
def conditional_logp(*args):
conditional_state = State([each for each in zip(free_vars, args)])
# Insert conditional values here, then pass to full logp
for i in frozen_vars:
conditional_state.update({i: frozen_state[i]})
return self._joint_logp(**conditional_state)
self.state = State([(var, frozen_state[var]) for var in free_vars])
self._logp_func = conditional_logp
if self._grad_logp_flag and AUTOGRAD:
self.model.grad_func = auto_grad_logp(conditional_logp, names=self.state.keys())
self.model.logp_func = self._logp_func
state = self.step()
# Add the frozen variables back into the state
new_state = State([(name, None) for name in self.var_names])
for var in state:
new_state.update({var: state[var]})
for var in frozen_vars:
new_state.update({var: frozen_state[var]})
self.state = new_state
return self.state
def step(self):
""" This is what you define to create the sampler. Requires that a
:ref:`state <state>` object is returned."""
pass
def sample(self, num, burn=0, thin=1, n_chains=1, progress_bar=True):
"""
Sample from :math:`P(X)`
:param num: *int.* Number of samples to draw from :math:`P(X)`.
:param burn: (optional) *int.*
Number of samples to discard from the beginning of the chain.
:param thin: (optional) *float.*
Thin the samples by this factor.
:param n_chains: (optional) *int.*
Number of chains to return. Each chain is given its own
process and the OS decides how to distribute the processes.
:param progress_bar: (optional) *boolean.*
Show the progress bar, default = True.
:return: Record array with fields taken from arguments of
logp function.
"""
if self.seed is not None:
np.random.seed(self.seed)
if AUTOGRAD and hasattr(self.model, 'grad_func') \
and self.model.grad_func is None:
self.model.grad_func = auto_grad_logp(self._logp_func)
# Constructing a recarray to store samples
dtypes = [(var, 'f8', np.shape(self.state[var])) for var in self.state]
samples = np.zeros(num, dtype=dtypes).view(np.recarray)
if n_chains != 1:
return parallel(self, n_chains, samples,
burn=burn, thin=thin,
progress_bar=progress_bar)
if self.sampler is None:
self.sampler = (self.step() for _ in count(start=0, step=1))
start_time = time.time() # For progress bar
for i in range(num):
samples[i] = tuple(next(self.sampler).values())
if progress_bar and time.time() - start_time > 1:
update_progress(i+1, num)
start_time = time.time()
if progress_bar:
update_progress(i+1, num, end=True)
# Clearing the cache after a run to save on memory.
self.model.clear_cache()
return samples[burn::thin]
def default_scale(scale, state):
""" If scale is None, return a State object with arrays of ones matching
the shape of values in state.
"""
if scale is None:
new_scale = State.fromkeys(state.keys())
for var in state:
new_scale.update({var: np.ones(np.shape(state[var]))})
return new_scale
else:
return scale
| 2.015625 | 2 |
wrappers/samtools/stats/wrapper.py | delvinso/crg2 | 7 | 12769627 | <reponame>delvinso/crg2
"""Snakemake wrapper for trimming paired-end reads using cutadapt."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, <NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
from snakemake.shell import shell
extra = snakemake.params.get("extra", "")
region = snakemake.params.get("region", "")
log = snakemake.log_fmt_shell(stdout=False, stderr=True)
shell("samtools stats {extra} {snakemake.input} {region} > {snakemake.output} {log}")
| 1.828125 | 2 |
pyleaves/train/example_train.py | JacobARose/pyleaves | 3 | 12769628 | <filename>pyleaves/train/example_train.py<gh_stars>1-10
"""
Created on Mon Feb 10 03:23:32 2019
script: pyleaves/pyleaves/train/example_train.py
@author: JacobARose
"""
def main(experiment_config, experiment_dir):
############################################
#TODO: Moving towards defining most or all run parameters in separate config files
############################################
trainer = BaseTrainer(experiment_config=experiment_config)
# for subset, paths in trainer.tfrecord_files.items():
# if experiment_config.verbose: print(subset)
# for path in paths:
# if experiment_config.verbose: print('\t',path)
# mlflow.log_artifact(path,f'artifacts/{subset}')
train_data = trainer.get_data_loader(subset='train')
val_data = trainer.get_data_loader(subset= 'val')
test_data = trainer.get_data_loader(subset='test')
# debug=False
# if debug:
# if tf.executing_eagerly():
# batch_imgs, batch_labels = next(iter(val_data))
# else:
# validation_iterator = val_data.make_one_shot_iterator()
# val_data_next = validation_iterator.get_next()
# sess = tf.compat.v1.Session()
# batch_imgs, batch_labels = sess.run(val_data_next)
# from pyleaves.analysis.img_utils import plot_image_grid
# plot_image_grid(batch_imgs, [np.argmax(l) for l in batch_labels], 8, 8)
# for i in range(64):
# img = batch_imgs[i,...]
# print(i, f'min = {np.min(img):.2f}, max = {np.max(img):.2f}, mean = {np.mean(img):.2f}, std = {np.std(img):.2f}')
# #From [-1.0,1.0] to [0,255]
# uint_imgs = np.array(batch_imgs)
# uint_imgs += 1
# uint_imgs /= 2
# uint_imgs *= 255
# uint_imgs = uint_imgs.astype(np.uint8)
# print(f'min = {np.min(batch_imgs):.2f}, max = {np.max(batch_imgs):.2f}, mean = {np.mean(batch_imgs):.2f}, std = {np.std(batch_imgs):.2f}')
# print(f'min = {np.min(uint_imgs)}, max = {np.max(uint_imgs)}, mean = {np.mean(uint_imgs):.2f}, std = {np.std(uint_imgs):.2f}')
# plot_image_grid(uint_imgs, [np.argmax(l) for l in batch_labels], 8, 8)
trainer.init_model_builder()
# model_config = trainer.get_model_config('train')
fit_params = trainer.get_fit_params()
callbacks = get_callbacks(weights_best=os.path.join(experiment_dir,'weights_best.h5'),
logs_dir=os.path.join(experiment_dir,'tensorboard_logs'),
restore_best_weights=False,
val_data=None)
# model_name = model_config.model_name
# print('model_config:\n',json.dumps(model_config,indent=4))
# if model_name is 'vgg16':
# model_builder = VGG16GrayScale(model_config)
# model = model_builder.build_model()
# elif model_name.startswith('resnet'):
# model_builder = ResNet(model_config)
# model = model_builder.build_model()
# else:
# model = build_model(**model_config)
history = trainer.model.fit(train_data,
steps_per_epoch = fit_params['steps_per_epoch'],
epochs= fit_params['epochs'],
validation_data=val_data,
validation_steps=fit_params['validation_steps'],
callbacks=callbacks
)
# trainer.config['model_config'] = model_config
# trainer.config.train_config['fit_params'] = fit_params
trainer.history = history
return trainer
if __name__=='__main__':
'''
Example:
python /home/jacob/pyleaves/pyleaves/train/example_train.py -d all -m all -gpu 3 -bsz 64 -lr 1e-4 --color_type grayscale -thresh 20 -r l2 -r_p 0.001 --experiment BaselinesGrayScale --data_db_path /home/jacob/pyleaves/pyleaves/leavesdb/resources/leavesdb.db
python /home/jacob/pyleaves/pyleaves/train/example_train.py -d Leaves2020 -m resnet_50_v2 -gpu 3 -bsz 64 -lr 1e-4 --color_type grayscale -thresh 20 -r l2 -r_p 0.001 --experiment BaselinesGrayScale --data_db_path /home/jacob/pyleaves/pyleaves/leavesdb/resources/converted_updated_leavesdb.db
python example_train.py -d PNAS -m resnet_50_v2 -gpu 3 -bsz 64
Possible models:
[
'shallow',
'vgg16',
'xception',
'resnet_50_v2',
'resnet_101_v2'
]
'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset_name', default='PNAS', type=str, help='Name of dataset of images to use for creating TFRecords')
parser.add_argument('-m', '--model_name', default='vgg16', type=str, help='Name of model to train')
parser.add_argument('-gpu', '--gpu_id', default='0', type=str, help='integer number of gpu to train on')
# parser.add_argument('-ch', '--num_channels', default=3, type=int, help='Number of input channels, either 1 for grayscale, or 3 for rgb')
parser.add_argument('-c', '--color_type', default='grayscale', type=str, help='grayscale or rgb')
parser.add_argument('-bsz', '--batch_size', default='64', type=str, help='Batch size. What else do you need to know?')
parser.add_argument('-lr', '--base_learning_rate', default='1e-4', type=str, help="Starting learning rate, <float> for a single value or 'all' to loop through a hardcoded range of values")
parser.add_argument('-thresh', '--low_class_count_thresh', default=10, type=int) #3
parser.add_argument('-r', '--regularizations', default='l2', type=str, help='comma separated list of regularizers to search through. Enter combinations of l1 and l2, enter anything else for None.') #3
parser.add_argument('-r_p', '--r_params', default='0.001', type=str, help='comma separated list of regularizer strengths to search through. Enter combinations of floats.') #3
parser.add_argument('-epochs', '--num_epochs', default=200, type=int, help='Number of epochs')
parser.add_argument('-exp', '--experiment', default='Baselines', type=str, help=r"Name of new or existing MLFlow experiment to log results into. TODO: Add None option")
parser.add_argument('--data_db_path', default=r'/home/jacob/pyleaves/pyleaves/leavesdb/resources/leavesdb.db', type=str, help='Directory in which to save/load models and/or model weights')
parser.add_argument('--model_dir', default=r'/media/data_cifs/jacob/Fossil_Project/models', type=str, help='Directory in which to save/load models and/or model weights')
parser.add_argument('-tfrec', '--tfrecord_dir', default=r'/media/data/jacob/Fossil_Project/tfrecord_data', type=str, help=r"Parent dir above the location that's intended for saving the TFRecords for this dataset")
parser.add_argument('-f',default='')
args = parser.parse_args()
import datetime
import json
import numpy as np
import os
import tensorflow as tf
# args.base_learning_rate = 'all'
# config = tf.ConfigProto(device_count = {'GPU': args.gpu_id})
# args.gpu_id=2
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id) ####SHOULD THIS BE AN INT???
tf.compat.v1.enable_eager_execution()
from pyleaves.utils import ensure_dir_exists # set_visible_gpus,
# set_visible_gpus([args.gpu_id])
####
from pyleaves.leavesdb.tf_utils.tf_utils import reset_eager_session
from pyleaves.models.resnet import ResNet, ResNetGrayScale
from pyleaves.models.vgg16 import VGG16, VGG16GrayScale
from pyleaves.models.keras_models import build_model
from pyleaves.train.callbacks import get_callbacks
from pyleaves.config import DatasetConfig, TrainConfig, ExperimentConfig
from pyleaves.train.base_trainer import BaseTrainer, BaseTrainer_v1
from pyleaves.analysis.mlflow_utils import mlflow_log_history, mlflow_log_best_history
import mlflow
import mlflow.tensorflow
tracking_dir = r'/media/data/jacob/Fossil_Project/experiments/mlflow'
ensure_dir_exists(tracking_dir)
mlflow.set_tracking_uri(tracking_dir)
print(mlflow.tracking.get_tracking_uri())
mlflow.set_experiment(args.experiment)
# print(mlflow.get_artifact_uri())
# if args.num_channels==3:
# color_type = 'rgb'
# else:
# color_type = 'grayscale'
############################
# Spaghetti Code for Assembling Hyperparameter search records to iterate through
#########################################
#########################################
import itertools
import random
random.seed(6)
from collections import OrderedDict
if args.model_name == 'all':
model_names = ['resnet_50_v2','resnet_152_v2', 'vgg16', 'xception', 'shallow'][:3]
# model_names = ['vgg16', 'xception', 'resnet_50_v2','resnet_101_v2', 'shallow']
else:
model_names=[args.model_name]
#########################################
if args.dataset_name == 'all':
dataset_names = ['PNAS', 'Fossil', 'Leaves2020'] #'Leaves']
else:
dataset_names = [args.dataset_name]
#########################################
learnrates = args.base_learning_rate.split(',')
learning_rates = []
for lr in learnrates:
try:
learning_rates.append(float(lr))
except ValueError:
if args.base_learning_rate == 'all':
learning_rates = [1e-3, 1e-4,1e-5]
break
if len(learning_rates)==0:
learning_rates = [1e-4]
print(f'Undefined Learning Rate option provided. Continuing with default {learning_rates[0]}')
#########################################
if args.batch_size == 'all':
batch_sizes = [64, 128]
else:
batch_sizes = [int(args.batch_size)]
#########################################
reg_list = args.regularizations.split(',')
regularizer_types=[]
for r in reg_list:
if r in ['l1','l2']:
regularizer_types.append(r)
if len(regularizer_types)==0:
regularizer_types = [None]
r_params=[]
for r_param in args.r_params.split(','):
try:
r_params.append(float(r_param))
except ValueError:
if r_param=='all':
r_params = [0.001, 0.01]
break
if len(r_params)==0:
r_params = [0.001]
print(f'Undefined Regularization option provided. Continuing with default {r_params[0]}')
hparams = OrderedDict()
hparams['model_name_list'] = model_names #['resnet_50_v2','resnet_152_v2', 'vgg16', 'xception', 'shallow']
hparams['dataset_name_list'] = dataset_names #['PNAS', 'Fossil', 'Leaves']
hparams['learning_rate_list'] = learning_rates #[1e-3, 1e-4,1e-5]
hparams['batch_size_list'] = batch_sizes #[64, 128]
hparams['regularizer_type_list'] = regularizer_types #['l1','l2']
hparams['regularizer_param_list'] = r_params #[0.001, 0.01]
hparams_labeled = OrderedDict()
for k, v in hparams.items():
hparams_labeled[k] = list(itertools.product([k.replace('_list','s')],v))
hparams_labeled
hparam_sampler = list(itertools.product(*list(hparams_labeled.values())))
print('BEGINNING HPARAM SEARCH THROUGH A TOTAL OF ',len(hparam_sampler),' INDIVIDUAL HPARAM PERMUTATIONS.')
print('#'*20,'\n','#'*20)
# random.shuffle(hparam_sampler)
#########################################
#########################################
for num_finished, hparam in enumerate(hparam_sampler):
hparam = {k:v for k,v in hparam}
# break
args.model_name = hparam['model_names']
args.dataset_name = hparam['dataset_names']
args.base_learning_rate = hparam['learning_rates']
args.batch_size = hparam['batch_sizes']
regularizer = {hparam['regularizer_types']:hparam['regularizer_params']}
run_name=f'{args.model_name}-{args.dataset_name}-{args.color_type}-lr_{args.base_learning_rate}-bsz_{args.batch_size}'
with mlflow.start_run(run_name=run_name, nested=True):
# num_channels=3
# if args.model_name=='vgg16':
# target_size=(224,224)
# if args.color_type=='grayscale':
# num_channels=1
# elif 'resnet' in args.model_name:
# target_size=(224,224)
# elif args.model_name=='xception':
# target_size=(299,299)
# else:
# target_size=(224,224)
histories = []
current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
experiment_dir = os.path.join(r'/media/data/jacob/Fossil_Project',
'experiments',
args.model_name,
args.dataset_name,
args.color_type,
f'lr-{args.base_learning_rate}-bsz_{args.batch_size}',
current_time)
reset_eager_session()
dataset_config = DatasetConfig(dataset_name=args.dataset_name,
label_col='family',
# target_size=target_size,
# num_channels=num_channels,
grayscale=(args.color_type=='grayscale'),
low_class_count_thresh=args.low_class_count_thresh,
data_splits={'val_size':0.2,'test_size':0.2},
tfrecord_root_dir=args.tfrecord_dir,
data_db_path=args.data_db_path,
num_shards=10)
train_config = TrainConfig(model_name=args.model_name,
model_dir=args.model_dir,
batch_size=args.batch_size,
frozen_layers=None, #(0,-4),
base_learning_rate=args.base_learning_rate,
buffer_size=500,
num_epochs=args.num_epochs,
preprocessing=True,
augment_images=True,
augmentations=['rotate','flip'],
regularization=regularizer,
seed=5,
verbose=True)
experiment_config = ExperimentConfig(dataset_config=dataset_config,
train_config=train_config)
mlflow.tensorflow.autolog()
# mlflow.log_params(experiment_config)
print(f'BEGINNING: DATASET:{args.dataset_name}|MODEL:{args.model_name}|bsz:{args.batch_size}|lr:{args.base_learning_rate}|Color_type={args.color_type}|regularizer={regularizer}')
print('-'*30)
trainer = main(experiment_config, experiment_dir)
history = trainer.history
histories.append((args.dataset_name, args.model_name, history))
mlflow.log_params(args.__dict__)
for k, v in trainer.configs.items():
mlflow.log_params(v)
print('logged ', k)
mlflow_log_history(history)
# #########################################
# #########################################
# for model_name in model_names:
# for dataset_name in dataset_names:
# for lr in learning_rates:
# for bsz in batch_sizes:
# with mlflow.start_run(run_name=f'{args.model_name}-{args.dataset_name}-{color_type}-lr_{args.base_learning_rate}-bsz_{args.batch_size}', nested=True):
# for regularizer in regularizations:
# args.batch_size = bsz
# args.base_learning_rate = lr
# args.dataset_name = dataset_name
# args.model_name = model_name
# print('model_name=',args.model_name)
# if args.model_name=='vgg16':
# target_size=(224,224)
# elif 'resnet' in args.model_name:
# target_size=(224,224)
# elif args.model_name=='xception':
# target_size=(299,299)
# else:
# target_size=(224,224)
# histories = []
# current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# experiment_dir = os.path.join(r'/media/data/jacob/Fossil_Project','experiments',args.model_name,args.dataset_name,color_type,f'lr-{args.base_learning_rate}-bsz_{args.batch_size}',current_time)
# reset_eager_session()
# dataset_config = DatasetConfig(dataset_name=args.dataset_name,
# label_col='family',
# target_size=target_size,
# num_channels=args.num_channels,
# grayscale=True,
# low_class_count_thresh=args.low_class_count_thresh,
# data_splits={'val_size':0.2,'test_size':0.2},
# tfrecord_root_dir=args.tfrecord_dir,
# num_shards=10)
# train_config = TrainConfig(model_name=args.model_name,
# model_dir=args.model_dir,
# batch_size=args.batch_size,
# frozen_layers=None, #(0,-4),
# base_learning_rate=args.base_learning_rate,
# buffer_size=500,
# num_epochs=args.num_epochs,
# preprocessing=True,
# augment_images=True,
# augmentations=['rotate','flip'],
# regularization=regularizer,
# seed=5,
# verbose=True)
# experiment_config = ExperimentConfig(dataset_config=dataset_config,
# train_config=train_config)
# mlflow.tensorflow.autolog()
# # mlflow.log_params(experiment_config)
# print(f'BEGINNING: DATASET:{args.dataset_name}|MODEL:{args.model_name}|bsz:{args.batch_size}|lr:{args.base_learning_rate}|num_channels:{args.num_channels}|Grayscale={experiment_config.grayscale}')
# print('-'*30)
# trainer = main(experiment_config, experiment_dir)
# history = trainer.history
# histories.append((dataset_name, model_name, history))
# mlflow.log_params(args.__dict__)
# for k, v in trainer.configs.items():
# mlflow.log_params(v)
# print('logged ', k)
# mlflow_log_history(history)
| 2.046875 | 2 |
boilerio/schedulerweb_zones.py | adpeace/boilerio | 4 | 12769629 | import datetime
from flask_restx import Namespace, Resource, fields, marshal
from flask import request
from . import model
from .schedulerweb_util import get_db
api = Namespace('Zones', title="Zone management")
a_zone = api.model('Zone', {
'zone_id': fields.Integer(description="ID of zone"),
'name': fields.String(),
'boiler_relay': fields.String(
description="Identifier of boiler relay for this zone."),
'sensor_id': fields.Integer(
description="Identifier of sensor for this zone.."),
})
@api.route("/")
class ListZones(Resource):
@api.marshal_list_with(a_zone)
def get(self):
db = get_db()
zones = model.Zone.all_from_db(db)
return zones
an_override = api.model("Temperature target override", {
'zone': fields.Integer(description="Which zone it applies to"),
'end': fields.DateTime(description="Date/time the override ends"),
'temp': fields.Float(description="Target temperature during override"),
})
@api.route("/<int:zone_id>/override")
class Override(Resource):
"""Temperature override for a zone."""
@api.response(code=200, model=an_override, description="OK")
@api.response(code=204, description="No overrides active")
def get(self, zone_id):
"""Get temperature override for zone.
Returns no override if an override was in place but has expired."""
# XXX we shouldn't be deciding on the server whether an override is
# active since it doesn't tell us whether the device is implementing it
# or not. This should move to target/reported state model.
now = datetime.datetime.now()
db = get_db()
overrides = model.TargetOverride.from_db(db, [zone_id])
if not overrides:
return None, 204
assert len(overrides) == 1, "Only support one override per zone"
override = overrides[0]
if override.end > now:
return marshal(override, an_override), 200
return None, 204
@api.doc(params={
"temp": {'description': "The override temperature to set.",
'type': float, 'required': True, 'in': 'formData'},
"days": {"type": int, "in": "formData"},
"hours": {"type": int, "in": "formData"},
"mins": {"type": int, "in": "formData"},
})
def post(self, zone_id):
"""Configure a temperature override.
Sepcify at least one of hours, mins, secs for duration."""
try:
secs = 0
if 'days' in request.form:
secs += int(request.form['days']) * 60 * 60 * 24
if 'hours' in request.form:
secs += int(request.form['hours']) * 60 * 60
if 'mins' in request.form:
secs += int(request.form['mins']) * 60
if not secs:
return 'Must specify days, hours, or mins', 400
duration = datetime.timedelta(0, secs)
temp = float(request.form['temp'])
except ValueError:
return '', 400
now = datetime.datetime.now()
end = now + duration
db = get_db()
override = model.TargetOverride(end, temp, zone_id)
override.save(db)
db.commit()
return ('', 200)
def delete(self, zone_id):
"""Clear temperature override."""
db = get_db()
model.TargetOverride.clear_from_db(db, zone_id)
db.commit()
return '', 200
a_gradient_measurement = api.model('Temperature gradient', {
'when': fields.DateTime(
description="Date/time the measurement was taken."),
'delta': fields.Float(
description="Difference between inside and "
"outside temperature at the start of the measurement."),
'gradient': fields.Float(
description="The temperature gradient in degrees C per "
"hour."),
})
a_gradient_average = api.model('Temperature gradient average', {
'delta': fields.Float(
description="Difference between inside and outside temperature"),
'gradient': fields.Float(
description="Average temperature gradient with heating on at "
"temperature difference of delta."),
'npoints': fields.Integer(
description="Number of data points contributing to the average "
"value given."),
})
@api.route('/<int:zone_id>/gradient_measurements')
class Gradient(Resource):
@api.expect(a_gradient_measurement)
def post(self, zone_id):
tgm = model.TemperatureGradientMeasurement(
zone_id, api.payload['when'], api.payload['delta'],
api.payload['gradient'])
db = get_db()
tgm.save(db)
db.commit()
@api.route('/<int:zone_id>/gradients')
class GradientTable(Resource):
@api.marshal_list_with(a_gradient_average)
def get(self, zone_id):
db = get_db()
r = model.TemperatureGradientMeasurement.get_gradient_table(
db, zone_id)
return r
a_device_state = api.model('Device reported state', {
'time_to_target': fields.Integer(
description="Seconds until target reached."),
'state': fields.String(description="State of device."),
'target': fields.Float(
description="Target the device is working towards."),
'current_temp': fields.Float(
description="Current temperature reported by the device."),
'target_overridden': fields.Boolean(
descripton="Whether the target temperature has been overridden"),
"current_outside_temp": fields.Float(
description="Current outside temperature reported by the device."),
"dutycycle": fields.Float(description="Dutycycle for boiler"),
})
@api.route('/<int:zone_id>/reported_state')
@api.param('zone_id', 'Zone ID for the time to target.')
class ReportedState(Resource):
@api.expect(a_device_state)
def post(self, zone_id):
db = get_db()
device_state = model.DeviceState(
datetime.datetime.now(),
zone_id, api.payload['state'], api.payload['target'],
api.payload['current_temp'], api.payload['time_to_target'],
api.payload['current_outside_temp'], api.payload['dutycycle'])
device_state.save(db)
db.commit()
@api.marshal_with(a_device_state)
def get(self, zone_id):
db = get_db()
device_state = model.DeviceState.last_from_db(db, zone_id)
db.commit()
return device_state
@api.route('/<int:zone_id>/schedule')
@api.param('zone_id', 'Zone ID for the schedule.')
class ZoneSchedule(Resource):
def get(self, zone_id):
db = get_db()
schedule = model.FullSchedule.from_db(db, zone_id)
db.commit()
entries = []
for (dow, time, _, temp) in schedule:
entries.append({
'day': dow,
'time': time.strftime('%H:%M'),
'temp': temp,
})
return entries
| 2.5625 | 3 |
src/WinePrediction.py | Raghuvp01/winePrediction | 0 | 12769630 | <filename>src/WinePrediction.py<gh_stars>0
import sys
import pyspark
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.feature import IndexToString, StringIndexer, VectorIndexer
from pyspark.ml.linalg import Vectors
from pyspark.mllib.linalg import Vectors
from pyspark.sql.functions import col
from pyspark.sql.functions import udf
from pyspark.sql.session import SparkSession
from pyspark.sql.types import StringType, DoubleType
if __name__ == "__main__":
# Starting the spark session
conf = pyspark.SparkConf().setAppName('winequality')
sc = pyspark.SparkContext.getOrCreate()
spark = SparkSession(sc)
# Loading the dataset
df = spark.read.format("csv").load(path , inferSchema='true',header = True ,sep =";")
df.printSchema()
# changing the 'quality' column name to 'label'
for col_name in df.columns[1:-1] + ['quality']:
df = df.withColumn(col_name, col(col_name).cast('float'))
df = df.withColumnRenamed('quality', "label")
# Convert to float format
def string_to_float(x):
return float(x)
# catalog the data
def catelogy(r):
if 0 <= r <= 6.5:
label = "bad"
elif 6.5 < r <= 10:
label = "good"
else:
label = "n/a"
return label
string_to_float_udf = udf(string_to_float, DoubleType())
quality_udf = udf(lambda x: catelogy(x), StringType())
df = df.withColumn("label", quality_udf("label"))
def transData(data):
return data.rdd.map(lambda r: [Vectors.dense(r[:-1]), r[-1]]).toDF(['features', 'label'])
transformed = transData(df)
labelIndexer = StringIndexer(inputCol='label', outputCol='indexedLabel').fit(transformed)
featureIndexer = VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(transformed)
# TODO
from pyspark.ml.feature import PCA
data = transformed
pca = PCA(k=6, inputCol="features", outputCol="pcaFeatures")
model = pca.fit(data)
result = model.transform(data).select("pcaFeatures")
result.show(truncate=False)
(trainingData, testData) = transformed.randomSplit([0.8, 0.2])
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
# Train a RandomForest model.
rf = RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures", numTrees=8, maxDepth=20, seed=42)
# Convert indexed labels back to original labels.
labelConverter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=labelIndexer.labels)
# Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, rf, labelConverter])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(labelCol="indexedLabel", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("accuracy = %g" % accuracy)
model.save(sys.argv[2])
print(sys.argv[2])
| 2.921875 | 3 |
cli/filter.py | sanaakhelloqi/pcapstats | 0 | 12769631 | <reponame>sanaakhelloqi/pcapstats
import src.utils as utils
from src.Sieve import Sieve
from pathlib import Path
import click
import json
from concurrent.futures import ProcessPoolExecutor
import tqdm
def filter_target_wrapper(data):
return filter_target(*data)
def filter_target(target, original_pcap, json_file):
target_pcap = utils.read_file(target)
if not target_pcap:
click.echo("Can't read file. Skipping…")
raise Exception
similarity_filter = Sieve(original_pcap, target_pcap, json_file)
return target, similarity_filter.sieve()
@click.command("filter", help="Filter pcap files based on similarity metrics.")
@click.argument("original", nargs=1)
@click.argument("targets", nargs=-1)
@click.option("-o", "--output", default="filter.json", help="File where filter output gets saved.")
@click.option("-j", "--json-file", help="JSON containing custom thresholds for the available metrics.")
@click.option("-p", "--processes", type=int, default=4, help="Maximum amount of concurrent processes.")
def cli_filter(original, targets, output, json_file, processes):
similar = []
dissimilar = []
click.echo("Comparing pcap files...")
original_pcap = utils.read_file(original)
if not original_pcap:
click.echo("No valid file type supplied. Aborting…")
return
zipped_targets = [[target, original_pcap, json_file] for target in targets]
with ProcessPoolExecutor(max_workers=processes) as executor:
for _target, _filter_result in list(tqdm.tqdm(executor.map(filter_target_wrapper, zipped_targets),
total=len(zipped_targets))):
if _filter_result:
similar.append(_target)
else:
dissimilar.append(_target)
with Path(output).open("w") as outfile:
out_dict = {"original": original, "similar": similar, "dissimilar": dissimilar}
json.dump(out_dict, outfile)
if __name__ == "__main__":
cli_filter()
| 2.5 | 2 |
paragraph_batch_utils.py | pavelmk/dynamic_rnn | 0 | 12769632 | <filename>paragraph_batch_utils.py<gh_stars>0
import numpy as np
def batch_data_generator(data, batch_size, n_epochs=-1):
""" Takes data, then yields it in batches for n_epochs epochs. Wraps around the end
of the dataset to repeat until the specified number of epochs is reached. We make
the choice to make all batches equally long (equal to batch_size), rather than terminating
precisely at the end of the epoch exhaustion.
inputs:
data: any-shaped numpy.array. batching is along the 0th dimension.
batch_size: int >= 1, <= data.shape[0]
n_epochs: if -1, repeats forever. else, terminates after data is
cycled through this many times.
returns:
a generator that will repeat the data in the specified batch size.
"""
if batch_size < 1:
raise ValueError("Cannot have batch size of < 1: {}".format(batch_size))
if batch_size > data.shape[0]:
raise ValueError("Batch size cannot be larger than data! {} > {}".format(batch_size,
data.shape[0]))
epoch_size = data.shape[0]
epoch_counter = 0
idx = 0
while (epoch_counter < n_epochs) or (n_epochs == -1):
idx_end = idx + batch_size
if idx_end < epoch_size:
yield data[idx:idx_end]
idx = idx_end
else:
# yield through the end then wrap around and grab part of the beginning, too.
remainder = epoch_size - idx
yield np.vstack([data[idx:epoch_size], data[0:batch_size-remainder]])
idx = batch_size-remainder
epoch_counter += 1
def pad_paragraphs_to_seq_length(paragraph_data, max_seq_length=15000):
""" Takes the list of paragraphs, pads or truncates them to max_seq_length, and returns an array of padded paragraphs,
as well as an array of the sequence lengths of those paragraphs.
inputs:
paragraph_data: a list of np.arrays of shape [None, 102]. Each list item corresponds to
one paragraph. The `None` dimension will be as long as the number of words
in a paragraph, and at each word position, the vector is the 102-dim embedding.
max_seq_length: the length beyond which to truncate, and prior to which to zero-pad.
returns:
a tuple of np.arrays of shape [num_paragraphs, max_seq_length, 102] (paragraph data),
and [num_paragraphs] (seq lengths)
"""
padded_paragraph_data = []
sequence_lengths = np.minimum([paragraph.shape[0] for paragraph in paragraph_data], max_seq_length)
for paragraph in paragraph_data:
paragraph_length = paragraph.shape[0]
if paragraph_length < max_seq_length:
# pad to max seq length
padded_paragraph = np.pad(paragraph,
[[0, max_seq_length - paragraph.shape[0]], [0,0]],
mode='constant',
constant_values=0)
elif paragraph_length > max_seq_length:
# just truncate to max seq length
padded_paragraph = paragraph[:max_seq_length]
elif paragraph_length == max_seq_length:
# perfect length already
padded_paragraph = paragraph
padded_paragraph_data.append(padded_paragraph)
return (np.array(padded_paragraph_data), np.array(sequence_lengths))
| 3.109375 | 3 |
medium/python/c0104_216_combination-sum-iii/00_leetcode_0104.py | drunkwater/leetcode | 0 | 12769633 | <gh_stars>0
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#216. Combination Sum III
#Find all possible combinations of k numbers that add up to a number n, given that only numbers from 1 to 9 can be used and each combination should be a unique set of numbers.
#Example 1:
#Input: k = 3, n = 7
#Output:
#[[1,2,4]]
#Example 2:
#Input: k = 3, n = 9
#Output:
#[[1,2,6], [1,3,5], [2,3,4]]
#Credits:
#Special thanks to @mithmatt for adding this problem and creating all test cases.
#class Solution(object):
# def combinationSum3(self, k, n):
# """
# :type k: int
# :type n: int
# :rtype: List[List[int]]
# """
# Time Is Money | 3.5 | 4 |
downsample_images.py | fireofearth/giraffe | 0 | 12769634 | import os
import logging
import glob
import pathlib
import argparse
import multiprocessing as mp
import cv2
#import matplotlib.pyplot as plt
logging.basicConfig(
format="%(asctime)s: %(levelname)s: %(message)s", level=logging.INFO
)
def parse_arguments():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
"--image-directory",
default="data/carlacarsv2/images/256",
type=str,
help="Directory containing images to downsample.",
)
argparser.add_argument(
"--out-directory",
default="data/carlacarsv2/images/64",
type=str,
help="Directory to downsample images.",
)
argparser.add_argument(
"--size",
default=64,
type=int,
help="Target size of image to downsample.",
)
return argparser.parse_args()
def divide(n, iterable):
"""Divide the elements from *iterable* into *n* parts as lists, maintaining order.
Taken from more-itertools with minor modification."""
if n < 1:
raise ValueError('n must be at least 1')
try:
iterable[:0]
except TypeError:
seq = tuple(iterable)
else:
seq = iterable
q, r = divmod(len(seq), n)
ret = []
stop = 0
for i in range(1, n + 1):
start = stop
stop += q + 1 if i <= r else q
ret.append(list(seq[start:stop]))
return ret
def path_to_filename(path, with_suffix=True):
"""Get filename from path.
Parameters
==========
path : str
Path to retrieve file name from e.g. '/path/to/image.png'.
with_suffix : bool
Whether to include the suffix of file path in file name.
Returns
=======
str
The file name of the path e.g. 'image.png'
or 'image' if `with_suffix` is false.
"""
p = pathlib.Path(path)
if with_suffix:
return str(p.name)
else:
return str(p.with_suffix("").name)
def downsample_image(infile_path, outfile_path, downsample_size):
image = cv2.imread(infile_path, flags=cv2.IMREAD_COLOR)
resized_image = cv2.resize(
image,
(downsample_size, downsample_size),
interpolation=cv2.INTER_AREA
)
cv2.imwrite(outfile_path, resized_image)
#def show_image(infile_path):
# image = cv2.imread(infile_path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plt.imshow(image)
# plt.show()
"""
cv2 reads BGR as its default colour order for images, matplotlib uses RGB.
Switching the ordering of the color channels doesn't cost any compute
as the underlying pixel values don't change. Make sure to use `cv2.cvtColor()` to
change the color ordering passing image to some other library API.
https://stackoverflow.com/questions/50963283/python-opencv-imshow-doesnt-need-convert-from-bgr-to-rgb
https://stackoverflow.com/questions/39316447/opencv-giving-wrong-color-to-colored-images-on-loading
"""
def worker_task(infile_paths, out_directory, downsample_size):
for infile_path in infile_paths:
fn = path_to_filename(infile_path, with_suffix=True)
outfile_path = os.path.join(out_directory, fn)
downsample_image(infile_path, outfile_path, downsample_size)
def main():
config = parse_arguments()
os.makedirs(config.out_directory, exist_ok=True)
paths = glob.glob(os.path.join(config.image_directory, "*.png"))
cpu_count = mp.cpu_count()
n_processes = min(cpu_count, len(paths) // 5)
logging.info(f"Found {len(paths)} images in image directory {config.image_directory}")
logging.info(f"There are {cpu_count} CPUs, using {n_processes} of them.")
processes = []
for infile_paths in divide(n_processes, paths):
p = mp.Process(
target=worker_task,
args=(infile_paths, config.out_directory, config.size)
)
p.start()
processes.append(p)
for p in processes:
p.join()
if __name__ == "__main__":
main()
| 2.703125 | 3 |
migrations/versions/f4a2a808cf0a_.py | ingalls/ml-enabler | 39 | 12769635 | <reponame>ingalls/ml-enabler<gh_stars>10-100
"""empty message
Revision ID: f4a2a808cf0a
Revises: c2<PASSWORD>
Create Date: 2019-05-20 17:46:02.329310
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f4a2a808cf0a'
down_revision = 'c<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('predictions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=False),
sa.Column('model_id', sa.BigInteger(), nullable=False),
sa.Column('predictions', sa.JSON(), nullable=False),
sa.ForeignKeyConstraint(['model_id'], ['ml_models.id'], name='fk_models'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('predictions')
# ### end Alembic commands ###
| 1.351563 | 1 |
shuffler/lib/subcommands/dbEvaluate.py | pscedu/mlstamps_oltr | 0 | 12769636 | import os, os.path as op
import logging
import numpy as np
import cv2
import progressbar
import ast
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pprint
import PIL
from lib.backend import backendDb
from lib.backend import backendMedia
from lib.utils import util
def add_parsers(subparsers):
evaluateDetectionParser(subparsers)
evaluateSegmentationIoUParser(subparsers)
evaluateBinarySegmentationParser(subparsers)
def _evaluateDetectionForClassPascal(c, c_gt, name, args):
def _voc_ap(rec, prec):
""" Compute VOC AP given precision and recall. """
# First append sentinel values at the end.
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# Compute the precision envelope.
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# To calculate area under PR curve, look for points
# where X axis (recall) changes value.
i = np.where(mrec[1:] != mrec[:-1])[0]
# Sum (\Delta recall) * prec.
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
c.execute('SELECT * FROM objects WHERE name=? ORDER BY score DESC',
(name, ))
entries_det = c.fetchall()
logging.info('Total %d detected objects for class "%s"', len(entries_det),
name)
# Go down dets and mark TPs and FPs.
tp = np.zeros(len(entries_det), dtype=float)
fp = np.zeros(len(entries_det), dtype=float)
# Detected of no interest.
ignored = np.zeros(len(entries_det), dtype=bool)
# 'already_detected' used to penalize multiple detections of same GT box.
already_detected = set()
# Go through each detection.
for idet, entry_det in enumerate(entries_det):
bbox_det = np.array(backendDb.objectField(entry_det, 'bbox'),
dtype=float)
imagefile = backendDb.objectField(entry_det, 'imagefile')
name = backendDb.objectField(entry_det, 'name')
# Get all GT boxes from the same imagefile [of the same class].
c_gt.execute('SELECT * FROM objects WHERE imagefile=? AND name=?',
(imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
bboxes_gt = np.array(
[backendDb.objectField(entry, 'bbox') for entry in entries_gt],
dtype=float)
# Separately manage no GT boxes.
if bboxes_gt.size == 0:
fp[idet] = 1.
continue
# Intersection between bbox_det and all bboxes_gt.
ixmin = np.maximum(bboxes_gt[:, 0], bbox_det[0])
iymin = np.maximum(bboxes_gt[:, 1], bbox_det[1])
ixmax = np.minimum(bboxes_gt[:, 0] + bboxes_gt[:, 2],
bbox_det[0] + bbox_det[2])
iymax = np.minimum(bboxes_gt[:, 1] + bboxes_gt[:, 3],
bbox_det[1] + bbox_det[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
intersection = iw * ih
# Union between bbox_det and all bboxes_gt.
union = (bbox_det[2] * bbox_det[3] +
bboxes_gt[:, 2] * bboxes_gt[:, 3] - intersection)
# IoU and get the best IoU.
IoUs = intersection / union
max_IoU = np.max(IoUs)
objectid_gt = objectids_gt[np.argmax(IoUs)]
logging.debug('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,
idet, objectid_gt)
# Find which objects count towards TP and FN (should be detected).
c_gt.execute(
'SELECT * FROM objects WHERE imagefile=? AND name=? AND %s' %
args.where_object_gt, (imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt_of_interest = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
# If 1) large enough IoU and
# 2) this GT box was not detected before.
if max_IoU > args.IoU_thresh and not objectid_gt in already_detected:
if objectid_gt in objectids_gt_of_interest:
tp[idet] = 1.
else:
ignored[idet] = True
already_detected.add(objectid_gt)
else:
fp[idet] = 1.
# Find the number of GT of interest.
c_gt.execute(
'SELECT COUNT(1) FROM objects WHERE %s AND name=?' %
args.where_object_gt, (name, ))
n_gt = c_gt.fetchone()[0]
logging.info('Total objects of interest: %d', n_gt)
# Remove dets, neither TP or FP.
tp = tp[np.bitwise_not(ignored)]
fp = fp[np.bitwise_not(ignored)]
logging.info('ignored: %d, tp: %d, fp: %d, gt: %d',
np.count_nonzero(ignored), np.count_nonzero(tp),
np.count_nonzero(fp), n_gt)
assert np.count_nonzero(tp) + np.count_nonzero(fp) + np.count_nonzero(
ignored) == len(entries_det)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(n_gt)
# Avoid divide by zero in case the first detection matches a difficult
# ground truth.
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
aps = _voc_ap(rec, prec)
print('Average precision for class "%s": %.4f' % (name, aps))
return aps
def _writeCurveValues(out_dir, X, Y, metrics_name, name, header):
if name is not None:
name = util.validateFileName(name)
stem = '%s-%s' % (metrics_name, name)
else:
stem = metrics_name
plt.savefig(op.join(out_dir, '%s.png' % stem))
plt.savefig(op.join(out_dir, '%s.eps' % stem))
with open(op.join(out_dir, '%s.txt' % stem), 'w') as f:
f.write('%s\n' % header)
for x, y in zip(X, Y):
f.write('%f %f\n' % (x, y))
def _beautifyPlot(ax):
ax.grid(which='major', linewidth='0.5')
ax.grid(which='minor', linewidth='0.2')
loc = ticker.MultipleLocator(0.2)
ax.xaxis.set_major_locator(loc)
ax.yaxis.set_major_locator(loc)
loc = ticker.MultipleLocator(0.1)
ax.xaxis.set_minor_locator(loc)
ax.yaxis.set_minor_locator(loc)
ax.set_aspect('equal', adjustable='box')
def _evaluateDetectionForClassSklearn(c, c_gt, class_name, args, sklearn):
''' Helper function for evaluateDetection. '''
# Detected objects sorted by descending score (confidence).
if class_name is None:
c.execute('SELECT * FROM objects ORDER BY score DESC')
else:
c.execute('SELECT * FROM objects WHERE name=? ORDER BY score DESC',
(class_name, ))
entries_det = c.fetchall()
logging.info('Num of positive "%s": %d', class_name, len(entries_det))
# Create arrays 'y_score' with predicted scores, binary 'y_true' for GT,
# and a binary 'y_ignored' for detected objects that are neither TP nor FP.
y_score = np.zeros(len(entries_det), dtype=float)
y_true = np.zeros(len(entries_det), dtype=bool)
y_ignored = np.zeros(len(entries_det), dtype=bool)
# 'already_detected' used to penalize multiple detections of same GT box
already_detected = set()
# Go through each detection.
for idet, entry_det in enumerate(entries_det):
bbox_det = np.array(backendDb.objectField(entry_det, 'bbox'),
dtype=float)
imagefile = backendDb.objectField(entry_det, 'imagefile')
name = backendDb.objectField(entry_det, 'name')
score = backendDb.objectField(entry_det, 'score')
y_score[idet] = score
# Get all GT boxes from the same imagefile and of the same class.
c_gt.execute('SELECT * FROM objects WHERE imagefile=? AND name=?',
(imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
bboxes_gt = np.array(
[backendDb.objectField(entry, 'bbox') for entry in entries_gt],
dtype=float)
# Separately manage the case of no GT boxes in this image.
if bboxes_gt.size == 0:
y_score[idet] = False
continue
# Intersection between bbox_det and all bboxes_gt.
ixmin = np.maximum(bboxes_gt[:, 0], bbox_det[0])
iymin = np.maximum(bboxes_gt[:, 1], bbox_det[1])
ixmax = np.minimum(bboxes_gt[:, 0] + bboxes_gt[:, 2],
bbox_det[0] + bbox_det[2])
iymax = np.minimum(bboxes_gt[:, 1] + bboxes_gt[:, 3],
bbox_det[1] + bbox_det[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
intersection = iw * ih
# Union between bbox_det and all bboxes_gt.
union = (bbox_det[2] * bbox_det[3] +
bboxes_gt[:, 2] * bboxes_gt[:, 3] - intersection)
# Compute the best IoU between the bbox_det and all bboxes_gt.
IoUs = intersection / union
max_IoU = np.max(IoUs)
objectid_gt = objectids_gt[np.argmax(IoUs)]
logging.debug('max_IoU=%.3f for idet %d with objectid_gt %d.', max_IoU,
idet, objectid_gt)
# Get all GT objects that are of interest.
c_gt.execute(
'SELECT * FROM objects WHERE imagefile=? AND name=? AND %s' %
args.where_object_gt, (imagefile, name))
entries_gt = c_gt.fetchall()
objectids_gt_of_interest = [
backendDb.objectField(entry, 'objectid') for entry in entries_gt
]
# Compute TP and FP. An object is a TP if:
# 1) it has a large enough IoU with a GT object and
# 2) this GT object was not detected before.
if max_IoU > args.IoU_thresh and not objectid_gt in already_detected:
if objectid_gt not in objectids_gt_of_interest:
y_ignored[idet] = True
already_detected.add(objectid_gt)
y_true[idet] = True
else:
y_true[idet] = False
# It doesn't matter if y_ignore'd GT fall into TP or FP. Kick them out.
y_score = y_score[np.bitwise_not(y_ignored)]
y_true = y_true[np.bitwise_not(y_ignored)]
# Find the number of GT of interest.
if class_name is None:
c_gt.execute('SELECT COUNT(1) FROM objects WHERE %s' %
args.where_object_gt)
else:
c_gt.execute(
'SELECT COUNT(1) FROM objects WHERE %s AND name=?' %
args.where_object_gt, (class_name, ))
num_gt = c_gt.fetchone()[0]
logging.info('Number of ground truth "%s": %d', class_name, num_gt)
# Add FN to y_score and y_true.
num_fn = num_gt - np.count_nonzero(y_true)
logging.info('Number of false negative "%s": %d', class_name, num_fn)
y_score = np.pad(y_score, [0, num_fn], constant_values=0.)
y_true = np.pad(y_true, [0, num_fn], constant_values=True)
# We need the point for threshold=0 to have y=0. Not sure why it's not yet.
# TODO: figure out how to do it properly.
y_score = np.pad(y_score, [0, 1000000], constant_values=0.0001)
y_true = np.pad(y_true, [0, 1000000], constant_values=False)
if 'precision_recall_curve' in args.extra_metrics:
precision, recall, _ = sklearn.metrics.precision_recall_curve(
y_true=y_true, probas_pred=y_score)
if args.out_dir:
plt.clf()
plt.plot(recall, precision)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('Recall')
plt.ylabel('Precision')
_beautifyPlot(plt.gca())
_writeCurveValues(args.out_dir, recall, precision,
'precision-recall', class_name,
'recall precision')
if 'roc_curve' in args.extra_metrics:
fpr, tpr, _ = sklearn.metrics.roc_curve(y_true=y_true,
probas_pred=y_score)
sklearn.metrics.auc(x=fpr, y=tpr)
if args.out_dir:
plt.clf()
plt.plot(fpr, tpr)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('FPR')
plt.ylabel('TPR')
_beautifyPlot(plt.gca())
_writeCurveValues(args.out_dir, fpr, tpr, 'roc', class_name,
'fpr tpr')
# Compute all metrics for this class.
aps = sklearn.metrics.average_precision_score(y_true=y_true,
y_score=y_score)
if class_name is None:
print('Average precision: %.4f' % aps)
else:
print('Average precision for class "%s": %.4f' % (class_name, aps))
return aps
def evaluateDetectionParser(subparsers):
parser = subparsers.add_parser(
'evaluateDetection',
description='Evaluate detections given a ground truth database.')
parser.set_defaults(func=evaluateDetection)
parser.add_argument('--gt_db_file', required=True)
parser.add_argument('--IoU_thresh', type=float, default=0.5)
parser.add_argument('--where_object_gt', default='TRUE')
parser.add_argument(
'--out_dir',
help='If specified, plots and text files are written here.')
parser.add_argument(
'--extra_metrics',
nargs='+',
default=[],
choices=[
'precision_recall_curve',
'roc_curve',
],
help='Select metrics to be computed in addition to average precision. '
'This is implemented only for evaluation_backend="sklearn". '
'They are computed for every class. The names match those at '
'https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics'
)
parser.add_argument(
'--evaluation_backend',
choices=['sklearn', 'pascal-voc', 'sklearn-all-classes'],
default='sklearn',
help='Detection evaluation is different across papers and methods. '
'PASCAL VOC produces average-precision score a bit different '
'than the sklearn package. A good overview on metrics: '
'https://github.com/rafaelpadilla/Object-Detection-Metrics. '
'"sklearn-all-classes" reports only one accuracy.')
def evaluateDetection(c, args):
if 'sklearn' in args.evaluation_backend:
import sklearn.metrics
# Load the ground truth database.
if not op.exists(args.gt_db_file):
raise FileNotFoundError('File does not exist: %s' % args.gt_db_file)
conn_gt = backendDb.connect(args.gt_db_file, 'load_to_memory')
c_gt = conn_gt.cursor()
# Some info for logging.
c.execute('SELECT COUNT(1) FROM objects')
logging.info('The evaluated database has %d objects.', c.fetchone()[0])
c_gt.execute('SELECT COUNT(1) FROM objects WHERE %s' %
args.where_object_gt)
logging.info('The ground truth database has %d objects of interest.',
c_gt.fetchone()[0])
c_gt.execute('SELECT DISTINCT(name) FROM objects')
names = c_gt.fetchall()
if args.evaluation_backend == 'sklearn':
for name, in names:
_evaluateDetectionForClassSklearn(c, c_gt, name, args, sklearn)
elif args.evaluation_backend == 'pascal-voc':
for name, in names:
if args.metrics is not None:
logging.warning('extra_metrics not supported for pascal-voc.')
_evaluateDetectionForClassPascal(c, c_gt, name, args)
elif args.evaluation_backend == 'sklearn-all-classes':
# This method does not separate results by classes.
_evaluateDetectionForClassSklearn(c, c_gt, None, args, sklearn)
else:
assert False
conn_gt.close()
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k],
minlength=n**2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def calc_fw_iu(hist):
pred_per_class = hist.sum(0)
gt_per_class = hist.sum(1)
return np.nansum(
(gt_per_class * np.diag(hist)) /
(pred_per_class + gt_per_class - np.diag(hist))) / gt_per_class.sum()
def calc_pixel_accuracy(hist):
gt_per_class = hist.sum(1)
return np.diag(hist).sum() / gt_per_class.sum()
def calc_mean_accuracy(hist):
gt_per_class = hist.sum(1)
acc_per_class = np.diag(hist) / gt_per_class
return np.nanmean(acc_per_class)
def save_colorful_images(prediction, filename, palette, postfix='_color.png'):
im = PIL.Image.fromarray(palette[prediction.squeeze()])
im.save(filename[:-4] + postfix)
def label_mapping(input_, mapping):
output = np.copy(input_)
for ind in range(len(mapping)):
output[input_ == mapping[ind][0]] = mapping[ind][1]
return np.array(output, dtype=np.int64)
def plot_confusion_matrix(cm, classes, normalize=False, cmap=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if cmap is None:
cmap = plt.cm.Blues
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
logging.info("Normalized confusion matrix.")
else:
logging.info(
'Confusion matrix will be computed without normalization.')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel('Ground truth')
plt.xlabel('Predicted label')
def _label2classMapping(gt_mapping_dict, pred_mapping_dict):
''' Parse user-defined label mapping dictionaries. '''
# "gt_mapping_dict" maps mask pixel-values to classes.
labelmap_gt = ast.literal_eval(gt_mapping_dict)
labelmap_pr = ast.literal_eval(
pred_mapping_dict) if pred_mapping_dict else labelmap_gt
# Create a list of classes.
class_names = list(labelmap_gt.values())
labelmap_gt_new = {}
# Here, we remap pixel-values to indices of class_names.
for key in labelmap_gt:
labelmap_gt_new[key] = class_names.index(labelmap_gt[key])
labelmap_gt = labelmap_gt_new
labelmap_pr_new = {}
for key in labelmap_pr:
if not labelmap_pr[key] in class_names:
raise ValueError(
'Class %s is in "pred_mapping_dict" but not in "gt_mapping_dict"'
)
labelmap_pr_new[key] = class_names.index(labelmap_pr[key])
labelmap_pr = labelmap_pr_new
return labelmap_gt, labelmap_pr, class_names
def evaluateSegmentationIoUParser(subparsers):
parser = subparsers.add_parser(
'evaluateSegmentationIoU',
description='Evaluate mask segmentation w.r.t. a ground truth db.')
parser.set_defaults(func=evaluateSegmentationIoU)
parser.add_argument('--gt_db_file', required=True)
parser.add_argument('--where_image', default='TRUE')
parser.add_argument(
'--out_dir',
help='If specified, output files with be written to "out_dir".')
parser.add_argument(
'--out_prefix',
default='',
help='A prefix to add to output filenames, '
'Use it to keep predictions from different epochs in one dir.')
parser.add_argument(
'--gt_mapping_dict',
required=True,
help=
'A map from ground truth maskfile to classes written as a json string. '
'E.g. "{0: \'background\', 255: \'car\'}"')
parser.add_argument(
'--pred_mapping_dict',
help='A map from predicted masks to classes written as a json string, '
'if different from "gt_mapping_dict"')
parser.add_argument(
'--class_to_record_iou',
help='If specified, IoU for a class is recorded into the "score" '
'field of the "images" table. '
'If not specified, mean IoU is recorded. '
'Should correspond to values of "gt_mapping_dict". E.g. "background".')
parser.add_argument(
'--out_summary_file',
help='Text file, where the summary is going to be appended as just one '
'line of format: out_prefix \\t IoU_class1 \\t IoU_class2 \\t etc.')
def evaluateSegmentationIoU(c, args):
import pandas as pd
import matplotlib.pyplot as plt
# Get corresponding maskfiles from predictions and ground truth.
logging.info('Opening ground truth dataset: %s', args.gt_db_file)
c.execute('ATTACH ? AS "attached"', (args.gt_db_file, ))
c.execute('SELECT pr.imagefile,pr.maskfile,gt.maskfile '
'FROM images pr INNER JOIN attached.images gt '
'WHERE pr.imagefile=gt.imagefile AND pr.maskfile IS NOT NULL '
'AND gt.maskfile IS NOT NULL '
'AND %s '
'ORDER BY pr.imagefile ASC' % args.where_image)
entries = c.fetchall()
logging.info(
'Total %d images in both the open and the ground truth databases.',
len(entries))
logging.debug(pprint.pformat(entries))
imreader = backendMedia.MediaReader(rootdir=args.rootdir)
labelmap_gt, labelmap_pr, class_names = _label2classMapping(
args.gt_mapping_dict, args.pred_mapping_dict)
if args.class_to_record_iou is not None and not args.class_to_record_iou in class_names:
raise ValueError(
'class_to_record_iou=%s is not among values of gt_mapping_dict=%s'
% (args.class_to_record_iou, args.gt_mapping_dict))
hist_all = np.zeros((len(class_names), len(class_names)))
for imagefile, maskfile_pr, maskfile_gt in progressbar.progressbar(
entries):
# Load masks and bring them to comparable form.
mask_gt = util.applyLabelMappingToMask(imreader.maskread(maskfile_gt),
labelmap_gt)
mask_pr = util.applyLabelMappingToMask(imreader.maskread(maskfile_pr),
labelmap_pr)
mask_pr = cv2.resize(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]),
interpolation=cv2.INTER_NEAREST)
# Evaluate one image pair.
careabout = ~np.isnan(mask_gt)
mask_gt = mask_gt[careabout][:].astype(int)
mask_pr = mask_pr[careabout][:].astype(int)
hist = fast_hist(mask_gt, mask_pr, len(class_names))
hist_all += hist
# Compute and record results by image.
iou_list = per_class_iu(hist)
if args.class_to_record_iou is None:
iou = iou_list.mean()
else:
iou = iou_list[class_names.index(args.class_to_record_iou)]
c.execute('UPDATE images SET score=? WHERE imagefile=?',
(iou, imagefile))
# Get label distribution.
pr_per_class = hist_all.sum(0)
gt_per_class = hist_all.sum(1)
iou_list = per_class_iu(hist_all)
fwIoU = calc_fw_iu(hist_all)
pixAcc = calc_pixel_accuracy(hist_all)
mAcc = calc_mean_accuracy(hist_all)
result_df = pd.DataFrame({
'class': class_names,
'IoU': iou_list,
"pr_distribution": pr_per_class,
"gt_distribution": gt_per_class,
})
result_df["IoU"] *= 100 # Changing to percent ratio.
result_df.set_index("class", inplace=True)
print("---- info per class -----")
print(result_df)
result_ser = pd.Series({
"pixAcc": pixAcc,
"mAcc": mAcc,
"fwIoU": fwIoU,
"mIoU": iou_list.mean()
})
result_ser = result_ser[["pixAcc", "mAcc", "fwIoU", "mIoU"]]
result_ser *= 100 # change to percent ratio
if args.out_dir is not None:
if not op.exists(args.out_dir):
os.makedirs(args.out_dir)
out_summary_path = op.join(args.out_dir, args.out_summary_file)
logging.info('Will add summary to: %s', out_summary_path)
with open(out_summary_path, 'a') as f:
f.write(args.out_prefix + '\t' +
'\t'.join(['%.2f' % x for x in result_df['IoU']]) + '\n')
# Save confusion matrix
fig = plt.figure()
normalized_hist = (hist.astype("float") /
hist.sum(axis=1)[:, np.newaxis])
plot_confusion_matrix(normalized_hist, classes=class_names)
outfigfn = op.join(args.out_dir, "%sconf_mat.pdf" % args.out_prefix)
fig.savefig(outfigfn,
transparent=True,
bbox_inches='tight',
pad_inches=0,
dpi=300)
print("Confusion matrix was saved to %s" % outfigfn)
outdffn = op.join(args.out_dir,
"%seval_result_df.csv" % args.out_prefix)
result_df.to_csv(outdffn)
print('Info per class was saved at %s !' % outdffn)
outserfn = op.join(args.out_dir,
"%seval_result_ser.csv" % args.out_prefix)
result_ser.to_csv(outserfn)
print('Total result is saved at %s !' % outserfn)
def getPrecRecall(tp, fp, fn):
''' Accumulate into Precision-Recall curve. '''
ROC = np.zeros((256, 2), dtype=float)
for val in range(256):
if tp[val] == 0 and fp[val] == 0:
precision = -1.
else:
precision = tp[val] / float(tp[val] + fp[val])
if tp[val] == 0 and fn[val] == 0:
recall = -1.
else:
recall = tp[val] / float(tp[val] + fn[val])
ROC[val, 0] = recall
ROC[val, 1] = precision
ROC = ROC[np.bitwise_and(ROC[:, 0] != -1, ROC[:, 1] != -1), :]
ROC = np.vstack((ROC, np.array([0, ROC[-1, 1]])))
area = -np.trapz(x=ROC[:, 0], y=ROC[:, 1])
return ROC, area
def evaluateBinarySegmentationParser(subparsers):
parser = subparsers.add_parser(
'evaluateBinarySegmentation',
description=
'Evaluate mask segmentation ROC curve w.r.t. a ground truth db. '
'Ground truth values must be 0 for background, 255 for foreground, '
'and the rest for "dontcare".'
'Predicted mask must be grayscale in [0,255], '
'with brightness meaning probability of foreground.')
parser.set_defaults(func=evaluateBinarySegmentation)
parser.add_argument('--gt_db_file', required=True)
parser.add_argument('--where_image', default='TRUE')
parser.add_argument(
'--out_dir',
help='If specified, result files with be written to "out_dir".')
parser.add_argument(
'--out_prefix',
default='',
help='A prefix to add to output filenames, '
'Use it to keep predictions from different epochs in one dir.')
parser.add_argument('--display_images_roc',
action='store_true',
help='Specify to display on screen')
def evaluateBinarySegmentation(c, args):
import pandas as pd
# Get corresponding maskfiles from predictions and ground truth.
c.execute('ATTACH ? AS "attached"', (args.gt_db_file, ))
c.execute('SELECT pr.imagefile,pr.maskfile,gt.maskfile '
'FROM images pr INNER JOIN attached.images gt '
'WHERE pr.imagefile=gt.imagefile '
'AND pr.maskfile IS NOT NULL '
'AND gt.maskfile IS NOT NULL '
'AND %s '
'ORDER BY pr.imagefile ASC' % args.where_image)
entries = c.fetchall()
logging.info(
'Total %d images in both the open and the ground truth databases.' %
len(entries))
logging.debug(pprint.pformat(entries))
imreader = backendMedia.MediaReader(rootdir=args.rootdir)
TPs = np.zeros((256, ), dtype=int)
FPs = np.zeros((256, ), dtype=int)
FNs = np.zeros((256, ), dtype=int)
if args.display_images_roc:
fig = plt.figure()
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1)
plt.ylim(0, 1)
for imagefile, maskfile_pr, maskfile_gt in progressbar.progressbar(
entries):
# Load masks and bring them to comparable form.
mask_gt = imreader.maskread(maskfile_gt)
mask_pr = imreader.maskread(maskfile_pr)
mask_pr = cv2.resize(mask_pr, (mask_gt.shape[1], mask_gt.shape[0]),
cv2.INTER_NEAREST)
# Some printputs.
gt_pos = np.count_nonzero(mask_gt == 255)
gt_neg = np.count_nonzero(mask_gt == 0)
gt_other = mask_gt.size - gt_pos - gt_neg
logging.debug('GT: positive: %d, negative: %d, others: %d.', gt_pos,
gt_neg, gt_other)
# If there is torch.
try:
import torch
# Use only relevant pixels (not the 'dontcare' class.)
relevant = np.bitwise_or(mask_gt == 0, mask_gt == 255)
mask_gt = mask_gt[relevant].flatten()
mask_pr = mask_pr[relevant].flatten()
mask_gt = torch.Tensor(mask_gt)
mask_pr = torch.Tensor(mask_pr)
try:
mask_gt = mask_gt.cuda()
mask_pr = mask_pr.cuda()
except RuntimeError:
pass
TP = np.zeros((256, ), dtype=int)
FP = np.zeros((256, ), dtype=int)
FN = np.zeros((256, ), dtype=int)
for val in range(256):
tp = torch.nonzero(torch.mul(mask_pr > val,
mask_gt == 255)).size()[0]
fp = torch.nonzero(torch.mul(mask_pr > val,
mask_gt != 255)).size()[0]
fn = torch.nonzero(torch.mul(mask_pr <= val,
mask_gt == 255)).size()[0]
tn = torch.nonzero(torch.mul(mask_pr <= val,
mask_gt != 255)).size()[0]
TP[val] = tp
FP[val] = fp
FN[val] = fn
TPs[val] += tp
FPs[val] += fp
FNs[val] += fn
ROC, area = getPrecRecall(TP, FP, FN)
logging.info('%s\t%.2f' % (op.basename(imagefile), area * 100.))
except ImportError:
# TODO: write the same without torch, on CPU
raise NotImplementedError(
'Non-torch implementation is still to be implemented.')
if args.display_images_roc:
plt.plot(ROC[:, 0], ROC[:, 1], 'go-', linewidth=2, markersize=4)
plt.pause(0.05)
fig.show()
# Accumulate into Precision-Recall curve.
ROC, area = getPrecRecall(TPs, FPs, FNs)
print(
"Average across image area under the Precision-Recall curve, perc: %.2f"
% (area * 100.))
if args.out_dir is not None:
if not op.exists(args.out_dir):
os.makedirs(args.out_dir)
fig = plt.figure()
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.plot(ROC[:, 0], ROC[:, 1], 'bo-', linewidth=2, markersize=6)
out_plot_path = op.join(args.out_dir,
'%srecall-prec.png' % args.out_prefix)
fig.savefig(out_plot_path,
transparent=True,
bbox_inches='tight',
pad_inches=0,
dpi=300)
| 2.25 | 2 |
scripts/merra-concat-dailyrad.py | jenfly/atmos-read | 3 | 12769637 | <gh_stars>1-10
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
sys.path.append('/home/jwalker/dynamics/python/monsoon-onset')
import xarray as xray
import numpy as np
import collections
import pandas as pd
import scipy
import atmos as atm
years = range(1980, 2016)
datadir = atm.homedir() + 'datastore/merra2/dailyrad/'
savefiles = {yr : datadir + 'merra2_RAD_%d.nc4' % yr for yr in years}
# ----------------------------------------------------------------------
def get_filenames(datadir, year):
prod_dict = {yr : '100' for yr in range(1980, 1992)}
for yr in range(1992, 2001):
prod_dict[yr] = '200'
for yr in range(2001, 2011):
prod_dict[yr] = '300'
for yr in range(2011, 2016):
prod_dict[yr] = '400'
prod = prod_dict[year]
filestr = (datadir + '%d/' % year + 'MERRA2_' + prod
+ '.tavg1_2d_rad_Nx.%d%02d%02d.SUB.nc4')
files = []
for mon in range(1, 13):
days = range(1, atm.days_this_month(year, mon) + 1)
for day in days:
files.append(filestr % (year, mon, day))
return files
for year in years:
files = get_filenames(datadir, year)
savefile = savefiles[year]
ds = atm.load_concat(files, concat_dim='time')
ds = ds.rename({'time' : 'day'})
ds = ds.drop(['time_bnds', 'bnds'])
ds['day'] = range(1, len(ds['day']) + 1)
print('Saving to ' + savefile)
ds.to_netcdf(savefile)
| 2.203125 | 2 |
SerieAByJBot.py | leeprinxin/TaiwanWebCrawler | 0 | 12769638 | <gh_stars>0
import requests, bs4
import json
from datetime import datetime
from datetime import datetime, timezone, timedelta
import traceback, pymssql
import time
from selenium import webdriver
import selenium
from selenium.webdriver.chrome.options import Options
from time import sleep # this should go at the top of the file
class SerieAWebCrawler(object):
def __init__(self):
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
self.SourceCode = 'Taiwan' # 台灣運彩編碼
self.SportCode = 1 # SerieA 運動編碼: 1 來自SportCode table
self.SportTournamentCode = '10041315' # 沿用
self.EventType = 0
self.CollectClient = 'Soccer'
self.server = "guess365.database.windows.net"
self.database = 'Guess365'
self.user = 'crawl'
self.password = '<PASSWORD>'
self.Games = '3' # 運動聯盟編號
self.urlsport = 'FTB'
self.game_date = '' # 請更改至一個月內的日期
self.access_token = 'FREE_TEST_KEY_FOR_20_TIMES_PER_DAY'
self.Basketball_UpcomingEvents_URL = 'https://api.sportsbot.tech/odds_movements'
self.alliance = '義大利甲級聯賽 '
self.SportText = 'Soccer'
self.TournamentText = 'Serie A'
def start(self, MatchResultsSearchDays=1):
self.MatchEntry = []
self.Odds = {}
self.get_MatchEntry()
self.get_Odds()
self.add_MatchEntry_or_Odds()
self.MatchResults = []
self.get_MatchResults(days=MatchResultsSearchDays)
self.add_MatchResults()
def get_ConnectionFromDB(self):
db = pymssql.connect(self.server, self.user, self.password, self.database)
cursor = db.cursor()
return db, cursor
def add_MatchResults(self):
print('*' * 20, 'add_MatchResults', '*' * 20)
if self.MatchResults == []:
print('MatchResults尚未進行爬蟲或無資料')
return
db, cursor = self.get_ConnectionFromDB()
for MatchResult in self.MatchResults:
try:
insert_sql = f'''INSERT INTO [dbo].[MatchResults] ([EventCode],[TournamentText],[MatchTime],[HomeTeam]
,[AwayTeam],[HomeScore],[AwayScore],[EndTime],[time_status],[error_log]) VALUES
('{MatchResult['EventCode']}','{MatchResult['TournamentText']}','{MatchResult['MatchTime']}','{MatchResult['HomeTeam']}',
'{MatchResult['AwayTeam']}','{MatchResult['HomeScore']}','{MatchResult['AwayScore']}','{MatchResult['EndTime']}',
'{MatchResult['time_status']}','{MatchResult['error_log']}')'''
print('執行:',insert_sql)
cursor.execute(insert_sql)
db.commit()
except:
print('目前MatchEntry尚無對應賽事或已經有重複賽事')
def get_MatchResults(self, days=1):
search_days = []
for day in range(1,days+1):
search_days.append((datetime.now()-timedelta(days=day)).strftime("%Y-%m-%d"))
self.CollectedTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S.000")
db, cursor = self.get_ConnectionFromDB()
print('*' * 20, 'get_MatchResults', '*' * 20)
for search_day in search_days:
url = self.Basketball_UpcomingEvents_URL + '/' + self.urlsport + '/' + search_day
data = {'access_token': self.access_token, 'alliance': self.alliance}
response = requests.get(url, data=data)
response_json = json.loads(response.text)
'''with open('JBotResponseEPL.txt','r', encoding='utf8') as f:
response_json = json.load(f)'''
if response_json["header"]["status"] == "WARNING_01":
print('當日無比賽或比賽尚未開盤')
elif response_json["header"]["status"] == "ERROR_04":
print('超過每日限定額度')
elif response_json["header"]["status"] == "ERROR_05":
print('訂閱已到期')
elif response_json["header"]["status"] == "ERROR_07":
print('執行 API 的間隔時間小於 5 秒')
elif response_json["header"]["status"] == "ERROR_06":
print('欲查詢的日期超過限制,免費密鑰為(過去30天~明天)、訂閱密鑰為(過去180天~明天)')
elif response_json["header"]["status"] == "SECCESS":
self.games = response_json['data'].items()
for key, game in self.games:
try:
HomeTeam = self.TeamNameCorrection(game['info']['home'],cursor,1)
AwayTeam = self.TeamNameCorrection(game['info']['away'],cursor,1)
MatchTime = self.MatchTimeCorrection(datetime.strptime(game['info']['time'], "%Y-%m-%dT%H:%M"),30, HomeTeam, AwayTeam, cursor)
EventCode_Prefix = self.Games + '_' + MatchTime.strftime("%Y%m%d") + '_' # 聯盟編號_比賽日期 ex. 2_20211213_
EventCode = EventCode_Prefix + str(game['info']['lottery_id'])
HomeScore = game['score']['home']
AwayScore = game['score']['away']
EndTime = self.CollectedTime
MatchResult = dict(EventCode=EventCode,TournamentText=self.TournamentText,MatchTime=MatchTime
,HomeTeam=HomeTeam,AwayTeam=AwayTeam,HomeScore=HomeScore,AwayScore = AwayScore
,EndTime=EndTime,time_status='Ended',error_log='None')
self.MatchResults.append(MatchResult)
print('爬取內容:',MatchResult)
except:
print(f"編號:{EventCode}尚未有賽果")
#traceback.print_exc()
pass
time.sleep(6)
def MatchTimeCorrection(self, mydatatime, offset_minute, HomeTeam, AwayTeam, cursor):
offset_sec = offset_minute * 60
timestamp = time.mktime(mydatatime.timetuple())
top = datetime.fromtimestamp(timestamp + offset_sec).strftime("%Y-%m-%d %H:%M:%S")
bottom = datetime.fromtimestamp(timestamp - offset_sec).strftime("%Y-%m-%d %H:%M:%S")
sql = f"SELECT MatchTime FROM MatchEntry where SourceCode = 'Bet365' AND MatchTime > '{bottom}' AND MatchTime < '{top}' AND HomeTeam = '{HomeTeam}' AND AwayTeam = '{AwayTeam}' "
print('執行', sql)
cursor.execute(sql)
result = cursor.fetchone()
if result:
print(f"更換為Bet365時間")
return result[0]
else:
print(f"更換為Taiwan時間")
return mydatatime
def TeamNameCorrection(self,Taiwan_TeamName, cursor, byChinese=0):
if byChinese:
sql = f"SELECT team FROM teams where name = N'{Taiwan_TeamName}' "
else:
sql = f"SELECT teams.team FROM teamText join teams on teamText.team_id = teams.id where Text = '{Taiwan_TeamName}' ;"
cursor.execute(sql)
result = cursor.fetchone()
if result:
print(f'{Taiwan_TeamName}更換名稱為{result[0]}')
return result[0]
else:
return Taiwan_TeamName
def is_MatchEntry_existed(self, cursor, EventCode):
'''
根據EventCode查詢比賽是否存在
:param cursor:SQL指標物件, EventCode:比賽識別碼
:return True:存在, False:不存在
'''
sql = f'''SELECT * FROM [dbo].[MatchEntry] where EventCode = '{EventCode}' '''
print('執行:',sql)
cursor.execute(sql)
results = cursor.fetchall()
if len(results)>0:
print(f"編號{EventCode}已經存在")
return True
else:
print(f"編號{EventCode}不存在")
return False
def is_Odds_existed(self,cursor,play):
sql = f"SELECT id FROM [dbo].[Odds] where GroupOptionCode = {play['GroupOptionCode']} AND EventCode = '{play['EventCode']}' AND OptionCode='{play['OptionCode']}' "
print('執行:',sql)
cursor.execute(sql)
results = cursor.fetchall()
if len(results)>0:
print(f"編號{play['EventCode']}的玩法{play['GroupOptionCode']}已經存在")
return True
else:
print(f"編號{play['EventCode']}的玩法{play['GroupOptionCode']}不存在")
return False
def add_MatchEntry_or_Odds(self):
print('*' * 20, 'add_MatchEntry_or_Odds', '*' * 20)
if self.Odds == {} or self.MatchEntry == []:
print('Odds or MatchEntry尚未進行爬蟲或無資料')
return
db, cursor = self.get_ConnectionFromDB()
for game_dict in self.MatchEntry:
if not self.is_MatchEntry_existed(cursor, game_dict['EventCode']): # 若比賽不存在,則MatchEntry與Odds寫入資料庫
insert_sql = f'''INSERT INTO [dbo].[MatchEntry]([SportText],[TournamentText],[HomeTeam],[AwayTeam],[Score],[MatchTime],[HomePitcher],[AwayPitcher],[SourceCode],[SportCode],[SportTournamentCode],[EventCode],[EventType],[CollectClient],[CollectedTime],[CreatedTime])VALUES
('{game_dict['SportText']}','{game_dict['TournamentText']}','{game_dict['HomeTeam']}','{game_dict['AwayTeam']}','{game_dict['Score']}','{game_dict['MatchTime']}','{game_dict['HomePitcher']}','{game_dict['AwayPitcher']}','{game_dict['SourceCode']}','{game_dict['SportCode']}','{game_dict['SportTournamentCode']}','{game_dict['EventCode']}','{game_dict['EventType']}','{game_dict['CollectClient']}','{game_dict['CollectedTime']}','{game_dict['CreatedTime']}')'''
print('執行:', insert_sql)
cursor.execute(insert_sql)
db.commit()
plays = self.Odds[game_dict['EventCode']]
if len(plays) < 1:
print(f"編號:{game_dict['EventCode']}尚無玩法")
for play in plays:
insert_sql = f'''INSERT INTO [dbo].[Odds]([GroupOptionCode],[OptionCode],[SpecialBetValue],[OptionRate],[SourceCode],[SportCode],[SportTournamentCode],[EventCode],[EventType],[CollectClient],[CollectedTime],[CreatedTime])VALUES
('{play['GroupOptionCode']}','{play['OptionCode']}','{play['SpecialBetValue']}','{play['OptionRate']}',
'{play['SourceCode']}','{play['SportCode']}','{play['SportTournamentCode']}','{play['EventCode']}','{play['EventType']}','{game_dict['CollectClient']}','{game_dict['CollectedTime']}','{game_dict['CreatedTime']}')'''
print('執行:', insert_sql)
cursor.execute(insert_sql)
db.commit()
insert_sql = f'''INSERT INTO [dbo].[OddsEntry]([GroupOptionCode],[OptionCode],[SpecialBetValue],[OptionRate],[SourceCode],[SportCode],[SportTournamentCode],[EventCode],[EventType],[CollectClient],[CollectedTime],[CreatedTime])VALUES
('{play['GroupOptionCode']}','{play['OptionCode']}','{play['SpecialBetValue']}','{play['OptionRate']}',
'{play['SourceCode']}','{play['SportCode']}','{play['SportTournamentCode']}','{play['EventCode']}','{play['EventType']}','{game_dict['CollectClient']}','{game_dict['CollectedTime']}','{game_dict['CreatedTime']}')'''
print('執行:', insert_sql)
cursor.execute(insert_sql)
db.commit()
else: # 若比賽存在,則MatchEntry更新
update_sql = f'''UPDATE [dbo].[MatchEntry] SET [MatchTime]='{game_dict['MatchTime']}' WHERE [EventCode] = '{game_dict['EventCode']}' '''
print('執行:', update_sql)
cursor.execute(update_sql)
plays = self.Odds[game_dict['EventCode']] # 提取每場比賽玩法
if len(plays) < 1:
print(f"編號:{game_dict['EventCode']}尚無玩法")
for play in plays:
if not self.is_Odds_existed(cursor, play):
'''
寫入Odds OddsEntry
'''
insert_sql = f'''INSERT INTO [dbo].[Odds]([GroupOptionCode],[OptionCode],[SpecialBetValue],[OptionRate],[SourceCode],[SportCode],[SportTournamentCode],[EventCode],[EventType],[CollectClient],[CollectedTime],[CreatedTime])VALUES
('{play['GroupOptionCode']}','{play['OptionCode']}','{play['SpecialBetValue']}','{play['OptionRate']}',
'{play['SourceCode']}','{play['SportCode']}','{play['SportTournamentCode']}','{play['EventCode']}','{play['EventType']}','{game_dict['CollectClient']}','{game_dict['CollectedTime']}','{game_dict['CreatedTime']}')'''
print('執行:', insert_sql)
cursor.execute(insert_sql)
db.commit()
insert_sql = f'''INSERT INTO [dbo].[OddsEntry]([GroupOptionCode],[OptionCode],[SpecialBetValue],[OptionRate],[SourceCode],[SportCode],[SportTournamentCode],[EventCode],[EventType],[CollectClient],[CollectedTime],[CreatedTime])VALUES
('{play['GroupOptionCode']}','{play['OptionCode']}','{play['SpecialBetValue']}','{play['OptionRate']}',
'{play['SourceCode']}','{play['SportCode']}','{play['SportTournamentCode']}','{play['EventCode']}','{play['EventType']}','{game_dict['CollectClient']}','{game_dict['CollectedTime']}','{game_dict['CreatedTime']}')'''
print('執行:', insert_sql)
cursor.execute(insert_sql)
db.commit()
else:
'''
更新Odds 寫入OddsEntry
'''
update_sql = f'''UPDATE [dbo].[Odds] SET [GroupOptionCode]='{play['GroupOptionCode']}',[OptionCode]='{play['OptionCode']}',[SpecialBetValue]='{play['SpecialBetValue']}',[OptionRate]='{play['OptionRate']}',
[SourceCode]='{play['SourceCode']}',[SportCode]='{play['SportCode']}',[SportTournamentCode]='{play['SportTournamentCode']}',[EventCode]='{play['EventCode']}',[EventType]='{play['EventType']}'
,[CollectClient]='{game_dict['CollectClient']}',[CollectedTime]='{game_dict['CollectedTime']}',[CreatedTime]='{game_dict['CreatedTime']}'
WHERE GroupOptionCode='{play['GroupOptionCode']}' AND EventCode='{play['EventCode']}' AND OptionCode='{play['OptionCode']}' '''
print('執行:', update_sql)
cursor.execute(update_sql)
db.commit()
insert_sql = f'''INSERT INTO [dbo].[OddsEntry]([GroupOptionCode],[OptionCode],[SpecialBetValue],[OptionRate],[SourceCode],[SportCode],[SportTournamentCode],[EventCode],[EventType],[CollectClient],[CollectedTime],[CreatedTime])VALUES
('{play['GroupOptionCode']}','{play['OptionCode']}','{play['SpecialBetValue']}','{play['OptionRate']}',
'{play['SourceCode']}','{play['SportCode']}','{play['SportTournamentCode']}','{play['EventCode']}','{play['EventType']}','{game_dict['CollectClient']}','{game_dict['CollectedTime']}','{game_dict['CreatedTime']}')'''
print('執行:', insert_sql)
cursor.execute(insert_sql)
db.commit()
cursor.close()
db.close()
def get_Odds(self):
'''
取得每場比賽玩法
QueryGroupOptionCode 比賽玩法編號:(依照網站參數為基準)
id no 台灣運彩玩法名稱 QueryGroupOptionCode
1 1 不讓分 10
2 1 大小[總分] 52
3 1 雙勝彩 55
QueryGroupOptionCode type QueryOptionCode
20 home 1
20 away 2
20 Draw X
52 Over Over
52 Under Under
55 HomeTeamName / Draw
55 Draw / AwayTeamName
55 HomeTeamName / AwayTeamName
:param
:return:
'''
print('*'*20,'get_Odds','*'*20)
if self.MatchEntry == []:
print('MatchEntry尚未進行爬蟲或尚無資料')
return
factor = 0 # 賠率因子
for game_dict in self.MatchEntry:
odds = game_dict['odds']
EventCode = game_dict['EventCode']
# 若該賽是已經存在於Odds,則不寫入(因為JBOT提供的賽事重複)
if EventCode in list(self.Odds.keys()):
continue
QueryOptionCode = {'10,home': 1, '10,away': 2, '10,Draw': 'X', '52,Over': 'Over', '52,Under': 'Under',
'51,home': 1, '51,Draw': 'X', '51,away': 2}
for GroupOptionCode in ['10','51','52']:
try: # 觀察中,若開盤但缺少其中一種玩法,則odds如何呈現。
print(f"編號{EventCode}爬取玩法:", GroupOptionCode)
if GroupOptionCode == '10' and odds['home_normal'] > 0 and odds['away_normal'] > 0 and odds['tie_normal'] > 0:
OptionCode_H = QueryOptionCode[f"{GroupOptionCode},home"]
OptionCode_A = QueryOptionCode[f"{GroupOptionCode},away"]
OptionCode_T = QueryOptionCode[f"{GroupOptionCode},Draw"]
SpecialBetValue_H = ''
SpecialBetValue_A = ''
SpecialBetValue_T = ''
OptionRate_H = odds['home_normal']
OptionRate_A = odds['away_normal']
OptionRate_T = odds['tie_normal']
elif GroupOptionCode == '51' and odds['home_handi'] > 0 and odds['away_handi'] > 0 and odds['tie_handi'] > 0:
OptionCode_H = QueryOptionCode[f"{GroupOptionCode},home"]
OptionCode_A = QueryOptionCode[f"{GroupOptionCode},away"]
OptionCode_T = QueryOptionCode[f"{GroupOptionCode},Draw"]
SpecialBetValue_H = -odds['handi'] if odds['away_is_plus'] == 1 else odds['handi']
SpecialBetValue_A = -odds['handi'] if odds['away_is_plus'] == 1 else odds['handi'] # 以主場讓方值為主
SpecialBetValue_T = ''
OptionRate_H = odds['home_handi']
OptionRate_A = odds['home_handi'] # 以主場賠率為主
OptionRate_T = odds['tie_handi']
elif GroupOptionCode == '52' and odds['big_25'] > 0 and odds['small_25'] > 0:
OptionCode_H = QueryOptionCode[f"{GroupOptionCode},Over"]
OptionCode_A = QueryOptionCode[f"{GroupOptionCode},Under"]
SpecialBetValue_H = 2.5
SpecialBetValue_A = 2.5
OptionRate_H = odds['big_25']
OptionRate_A = odds['small_25']
else:
raise Exception(f'玩法:{GroupOptionCode}目前尚未開盤')
SourceCode = self.SourceCode
SportCode = self.SportCode
SportTournamentCode = self.SportTournamentCode
EventType = self.EventType
CollectClient = self.CollectClient
CollectedTime = self.CollectedTime
CreatedTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S.000")
play_dict_H = dict(GroupOptionCode=GroupOptionCode, OptionCode=OptionCode_H,
SpecialBetValue=SpecialBetValue_H, OptionRate=OptionRate_H, SourceCode=SourceCode,
SportCode=SportCode, SportTournamentCode=SportTournamentCode, EventCode=EventCode,
EventType=EventType, CollectClient=CollectClient, CollectedTime=CollectedTime,
CreatedTime=CreatedTime)
play_dict_A = dict(GroupOptionCode=GroupOptionCode, OptionCode=OptionCode_A,
SpecialBetValue=SpecialBetValue_A, OptionRate=OptionRate_A, SourceCode=SourceCode,
SportCode=SportCode, SportTournamentCode=SportTournamentCode, EventCode=EventCode,
EventType=EventType, CollectClient=CollectClient, CollectedTime=CollectedTime,
CreatedTime=CreatedTime)
if not GroupOptionCode == '52':
play_dict_T = dict(GroupOptionCode=GroupOptionCode, OptionCode=OptionCode_T,
SpecialBetValue=SpecialBetValue_T, OptionRate=OptionRate_T, SourceCode=SourceCode,
SportCode=SportCode, SportTournamentCode=SportTournamentCode, EventCode=EventCode,
EventType=EventType, CollectClient=CollectClient, CollectedTime=CollectedTime,
CreatedTime=CreatedTime)
try:
if not GroupOptionCode == '52':
self.Odds[EventCode] = self.Odds[EventCode] + [play_dict_H, play_dict_A, play_dict_T]
else:
self.Odds[EventCode] = self.Odds[EventCode] + [play_dict_H, play_dict_A]
except:
if not GroupOptionCode == '52':
self.Odds[EventCode] = [play_dict_H, play_dict_A, play_dict_T]
else:
self.Odds[EventCode] = [play_dict_H, play_dict_A]
except:
print(traceback.format_exc())
pass
try:
plays = {2:1,3:1,4:2,5:2,6:2,8:3}
print(f"編號{EventCode}有{plays[len(self.Odds[EventCode])]}種玩法")
except:
print(f"編號{EventCode}有{0}種玩法")
self.Odds[EventCode] = []
print('*'*20)
def get_MatchEntry(self):
"""
透過JBot API取得NBA所有比賽
:param
:return:
"""
res = requests.get(self.Basketball_UpcomingEvents_URL, headers=self.headers)
today = datetime.now().strftime("%Y-%m-%d")
tomorrow = (datetime.now()+timedelta(days=1)).strftime("%Y-%m-%d")
#for day in [today]:
for day in [today, tomorrow]:
url = self.Basketball_UpcomingEvents_URL + '/' + self.urlsport + '/' + day
data = {'access_token': self.access_token, 'alliance': self.alliance}
response = requests.get(url, data=data)
response_json = json.loads(response.text)
'''with open('JBotResponseEPL.txt','r', encoding='utf8') as f:
response_json = json.load(f)'''
self.CollectedTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S.000")
db, cursor = self.get_ConnectionFromDB()
print('*'*20,'get_MatchEntry',day,'*'*20)
if response_json["header"]["status"] == "WARNING_01":
print('當日無比賽或比賽尚未開盤')
elif response_json["header"]["status"] == "ERROR_04":
print('超過每日限定額度')
elif response_json["header"]["status"] == "ERROR_05":
print('訂閱已到期')
elif response_json["header"]["status"] == "ERROR_07":
print('執行 API 的間隔時間小於 5 秒')
elif response_json["header"]["status"] == "ERROR_06":
print('欲查詢的日期超過限制,免費密鑰為(過去30天~明天)、訂閱密鑰為(過去180天~明天)')
elif response_json["header"]["status"] == "SECCESS":
self.games = response_json['data'].items()
for key, game in self.games:
HomeTeam = self.TeamNameCorrection(game['info']['home'], cursor, byChinese=1)
AwayTeam = self.TeamNameCorrection(game['info']['away'], cursor, byChinese=1)
MatchTime = self.MatchTimeCorrection(datetime.strptime(game['info']['time'],"%Y-%m-%dT%H:%M"), 30, HomeTeam, AwayTeam, cursor)
EventCode_Prefix = self.Games + '_' + MatchTime.strftime("%Y%m%d") + '_' # 聯盟編號_比賽日期 ex. 2_20211213_
EventCode = EventCode_Prefix + str(game['info']['lottery_id'])
game_dict = dict(
SportText=self.SportText,
TournamentText=self.TournamentText,
HomeTeam=HomeTeam,
AwayTeam=AwayTeam,
Score='',
MatchTime=MatchTime,
HomePitcher='',
AwayPitcher='',
SourceCode=self.SourceCode,
SportCode=self.SportCode,
SportTournamentCode=self.SportTournamentCode,
EventCode=EventCode,
EventType=self.EventType,
CollectClient=self.CollectClient,
CollectedTime=self.CollectedTime,
CreatedTime=datetime.now().strftime("%Y-%m-%d %H:%M:%S.000"),
odds = game['odds'][-1]
)
self.MatchEntry.append(game_dict)
time.sleep(6)
cursor.close()
db.close()
if __name__ == '__main__' :
SerieAWebCrawler = SerieAWebCrawler()
print('MatchResultsSearchDays = [1天, 3天, 1週, 3週, 1月, 2月, 3月]')
SerieAWebCrawler.start(MatchResultsSearchDays=1)
| 2.6875 | 3 |
misc/deep_learning_notes/Ch3 Advanced Tensorflow/GPU and device management tests/2_single_device_indepth.py | tmjnow/MoocX | 7 | 12769639 | import tensorflow as tf
"""
Instruction to the code there can be found at:
https://www.tensorflow.org/versions/r0.10/how_tos/using_gpu/index.html
"""
# Creates a graph.
with tf.device('/cpu:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
print(sess.run(c))
"""
Device mapping:
/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GRID K520, pci bus id: 0000:00:03.0
/job:localhost/replica:0/task:0/gpu:1 -> device: 1, name: GRID K520, pci bus id: 0000:00:04.0
/job:localhost/replica:0/task:0/gpu:2 -> device: 2, name: GRID K520, pci bus id: 0000:00:05.0
/job:localhost/replica:0/task:0/gpu:3 -> device: 3, name: GRID K520, pci bus id: 0000:00:06.0
I tensorflow/core/common_runtime/direct_session.cc:175] Device mapping:
/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GRID K520, pci bus id: 0000:00:03.0
/job:localhost/replica:0/task:0/gpu:1 -> device: 1, name: GRID K520, pci bus id: 0000:00:04.0
/job:localhost/replica:0/task:0/gpu:2 -> device: 2, name: GRID K520, pci bus id: 0000:00:05.0
/job:localhost/replica:0/task:0/gpu:3 -> device: 3, name: GRID K520, pci bus id: 0000:00:06.0
MatMul: /job:localhost/replica:0/task:0/cpu:0
I tensorflow/core/common_runtime/simple_placer.cc:818] MatMul: /job:localhost/replica:0/task:0/cpu:0
b: /job:localhost/replica:0/task:0/cpu:0
I tensorflow/core/common_runtime/simple_placer.cc:818] b: /job:localhost/replica:0/task:0/cpu:0
a: /job:localhost/replica:0/task:0/cpu:0
I tensorflow/core/common_runtime/simple_placer.cc:818] a: /job:localhost/replica:0/task:0/cpu:0
[[ 22. 28.]
[ 49. 64.]]
"""
""" allow memory growth:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config, ...)
"""
""" Partial memory allocation
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
session = tf.Session(config=config, ...)
"""
### Allow non-default manual device selection. Flag needed for manual device selection
# Creates a graph.
with tf.device('/gpu:5'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with allow_soft_placement and log_device_placement set
# to True.
# Ends up using /gpu:0 because it is available.
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True)) as sess:
print(sess.run(c))
| 3.4375 | 3 |
InvenTree/InvenTree/test_api.py | onurtatli/InvenTree | 0 | 12769640 | """ Low level tests for the InvenTree API """
from rest_framework import status
from django.urls import reverse
from InvenTree.api_tester import InvenTreeAPITestCase
from users.models import RuleSet
from base64 import b64encode
class APITests(InvenTreeAPITestCase):
""" Tests for the InvenTree API """
fixtures = [
'location',
'stock',
'part',
'category',
]
token = None
auto_login = False
def setUp(self):
super().setUp()
def basicAuth(self):
# Use basic authentication
authstring = bytes("{u}:{p}".format(u=self.username, p=self.password), "ascii")
# Use "basic" auth by default
auth = b64encode(authstring).decode("ascii")
self.client.credentials(HTTP_AUTHORIZATION="Basic {auth}".format(auth=auth))
def tokenAuth(self):
self.basicAuth()
token_url = reverse('api-token')
response = self.client.get(token_url, format='json', data={})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('token', response.data)
token = response.data['token']
self.token = token
def token_failure(self):
# Test token endpoint without basic auth
url = reverse('api-token')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertIsNone(self.token)
def token_success(self):
self.tokenAuth()
self.assertIsNotNone(self.token)
def test_info_view(self):
"""
Test that we can read the 'info-view' endpoint.
"""
url = reverse('api-inventree-info')
response = self.client.get(url, format='json')
data = response.json()
self.assertIn('server', data)
self.assertIn('version', data)
self.assertIn('instance', data)
self.assertEquals('InvenTree', data['server'])
def test_role_view(self):
"""
Test that we can access the 'roles' view for the logged in user.
Also tests that it is *not* accessible if the client is not logged in.
"""
url = reverse('api-user-roles')
response = self.client.get(url, format='json')
# Not logged in, so cannot access user role data
self.assertTrue(response.status_code in [401, 403])
# Now log in!
self.basicAuth()
response = self.get(url)
data = response.data
self.assertIn('user', data)
self.assertIn('username', data)
self.assertIn('is_staff', data)
self.assertIn('is_superuser', data)
self.assertIn('roles', data)
roles = data['roles']
role_names = roles.keys()
# By default, 'view' permissions are provided
for rule in RuleSet.RULESET_NAMES:
self.assertIn(rule, role_names)
self.assertIn('view', roles[rule])
self.assertNotIn('add', roles[rule])
self.assertNotIn('change', roles[rule])
self.assertNotIn('delete', roles[rule])
def test_with_superuser(self):
"""
Superuser should have *all* roles assigned
"""
self.user.is_superuser = True
self.user.save()
self.basicAuth()
response = self.get(reverse('api-user-roles'))
roles = response.data['roles']
for rule in RuleSet.RULESET_NAMES:
self.assertIn(rule, roles.keys())
for perm in ['view', 'add', 'change', 'delete']:
self.assertIn(perm, roles[rule])
def test_with_roles(self):
"""
Assign some roles to the user
"""
self.basicAuth()
response = self.get(reverse('api-user-roles'))
self.assignRole('part.delete')
self.assignRole('build.change')
response = self.get(reverse('api-user-roles'))
roles = response.data['roles']
# New role permissions should have been added now
self.assertIn('delete', roles['part'])
self.assertIn('change', roles['build'])
| 2.734375 | 3 |
mom/__init__.py | ands904/ands904-tinypyclone | 16 | 12769641 | <reponame>ands904/ands904-tinypyclone<filename>mom/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 <NAME> <<EMAIL>>
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":synopsis: Mother of all our Python projects.
:module: mom
How many times have you noticed a ``utils`` subpackage or module?
-----------------------------------------------------------------
Yeah. There is a lot of code duplication that occurs throughout
our Python-based projects and results in code that is harder
to maintain in the long term. Not to mention all the duplicate
test code and more maintenance.
Therefore, we have decided to move all those ``util`` modules and
subpackages to a central library, which we use throughout our projects.
If you have a ``utils`` module, chances are you're duplicating
and wasting effort whereas instead you could use tested code
provided by this library. If there's something not included in
this library and think it should, speak up.
.. automodule:: mom.builtins
.. automodule:: mom.collections
.. automodule:: mom.decorators
.. automodule:: mom.functional
.. automodule:: mom.itertools
.. automodule:: mom.math
.. automodule:: mom.mimeparse
.. automodule:: mom.string
"""
__author__ = "<EMAIL> (<NAME>)"
| 1.8125 | 2 |
picasso/inpainters/interfaces.py | giuspugl/picasso | 2 | 12769642 | #
#
#
#
# date: 2019-08-20
# author: <NAME>
# python3.6
# Copyright (C) 2019 <NAME> <EMAIL>
#
#import .deep_prior_inpainter as dp
#import .contextual_attention_gan as ca
#import .nearest_neighbours_inpainter as nn
from inpainters import (
deep_prior_inpainter as dp ,
contextual_attention_gan as ca,
nearest_neighbours_inpainter as nn
)
class HoleInpainter(object) :
"""
This class provides an interface to the 3 inpainting techniques.
One of the key parameters is `args` importing arguments input by the user in the inpainting scripts.
"""
def __init__ (self, args , Npix = 128, meshgrid=True ) :
"""
Initialize inpainter with the method given in ``args.method``.
So far the Deep-Prior and GAN architecture are compatible to run on ``128x128`` images.
"""
if args.method =='Deep-Prior':
self.Inpainter = dp.DeepPrior ( (Npix, Npix, 4),
verbose = args.debug, meshgrid=meshgrid )
self.epochs =args.dp_epochs
self.optimizer="Adam"
self.Inpainter.compile(optimizer=self.optimizer )
elif args.method=='Contextual-Attention' :
self.Inpainter = ca.ContextualAttention( modeldir =args.checkpoint_dir
, verbose = args.debug )
elif args.method=='Nearest-Neighbours' :
self.Inpainter = nn.NearestNeighbours(verbose = args.debug, Npix=Npix, tol =args.nn_tol )
self.method = args.method
pass
def __call__(self, reuse ) :
"""
Run inpainting,
**Parameters**
- `reuse`:{bool}
whether to recompile or not the Deep-Prior and GAN neural network.
"""
if self.method== 'Deep-Prior':
return self.DPinpaint(reuse=reuse )
elif self.method== 'Contextual-Attention':
return self.GANinpaint(reuse=reuse )
elif self.method== 'Nearest-Neighbours':
return self.NNinpaint()
def setup_input(self , fname, rdseed=None ) :
"""
Pre-process the flat map by renormalizing and reshaping it
as it required by the inpainting method
"""
self.Inpainter.rdseed = rdseed
return self.Inpainter.setup_input( fname )
def DPinpaint(self,reuse ) :
"""
Set of instructions to inpaint with :class:`DeepPrior`
"""
if reuse :
self.Inpainter.compile (optimizer=self.optimizer)
self.Inpainter.train(self.Inpainter.Z , self.Inpainter.X , epochs=self.epochs )
self.Inpainter.evaluate(self.Inpainter.Z,self.Inpainter.X)
p = self.Inpainter.predict()[0,:,:,0]
p = self.Inpainter.rescale_back(p )
return p
def GANinpaint (self , reuse ) :
"""
Set of instructions to inpaint with
:class:`ContextualAttention`
"""
p = self.Inpainter.predict( reuse )
p = self.Inpainter.rescale_back(p )
return p
def NNinpaint (self ) :
"""
Set of instructions to inpaint with :class:`NearestNeighbours`
"""
return self.Inpainter.predict ( )
| 2.359375 | 2 |
examples/hica/injectors/env_passthrough.py | connectthefuture/docker-hacks | 0 | 12769643 | # vim: set fileencoding=utf-8
# <NAME> <<EMAIL>>
#
# HICA - Host integrated container applications
#
# MIT License (C) 2015
import os
from base.hica_base import *
class EnvPassthroughInjector(HicaInjector):
def get_description(self):
return "Bind mounts current environment into the container"
def get_config_key(self):
return "io.hica.env_passthrough"
def get_injected_args(self):
return (("--env", HicaValueType.FULLENV, os.environ),)
def register(context):
obj = EnvPassthroughInjector()
context[obj.get_config_key()] = obj
| 1.867188 | 2 |
lib/python3.8/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py | cjsteel/python3-venv-ansible-2.10.5 | 0 | 12769644 | <reponame>cjsteel/python3-venv-ansible-2.10.5<filename>lib/python3.8/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_quota_policy.py
#!/usr/bin/python
# (c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_ontap_quota_policy
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: na_ontap_quota_policy
short_description: NetApp Ontap create, assign, rename or delete quota policy
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: '19.11.0'
author: NetApp Ansible Team (@carchi8py) <<EMAIL>>
description:
- Create, assign, rename or delete the quota policy
options:
state:
description:
- Whether the specified quota policy should exist or not.
choices: ['present', 'absent']
default: present
type: str
vserver:
description:
- Specifies the vserver for the quota policy.
required: true
type: str
name:
description:
- Specifies the quota policy name to create or rename to.
required: true
type: str
from_name:
description:
- Name of the existing quota policy to be renamed to name.
type: str
auto_assign:
description:
- when true, assign the policy to the vserver, whether it is newly created, renamed, or already exists.
- when true, the policy identified by name replaces the already assigned policy.
- when false, the policy is created if it does not already exist but is not assigned.
type: bool
default: true
version_added: 20.12.0
"""
EXAMPLES = """
- name: Create quota policy
na_ontap_quota_policy:
state: present
vserver: SVM1
name: ansible_policy
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Rename quota policy
na_ontap_quota_policy:
state: present
vserver: SVM1
name: new_ansible
from_name: ansible
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Delete quota policy
na_ontap_quota_policy:
state: absent
vserver: SVM1
name: ansible_policy
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
import ansible_collections.netapp.ontap.plugins.module_utils.zapis_svm as zapis_svm
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapQuotaPolicy(object):
"""
Create, assign, rename or delete a quota policy
"""
def __init__(self):
"""
Initialize the ONTAP quota policy class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
vserver=dict(required=True, type='str'),
name=dict(required=True, type='str'),
from_name=dict(required=False, type='str'),
auto_assign=dict(required=False, type='bool', default=True),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['name', 'vserver'])
],
supports_check_mode=True
)
# set up variables
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg='The python NetApp-Lib module is required')
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
def get_quota_policy(self, policy_name=None):
if policy_name is None:
policy_name = self.parameters['name']
return_value = None
quota_policy_get_iter = netapp_utils.zapi.NaElement('quota-policy-get-iter')
quota_policy_info = netapp_utils.zapi.NaElement('quota-policy-info')
quota_policy_info.add_new_child('policy-name', policy_name)
quota_policy_info.add_new_child('vserver', self.parameters['vserver'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(quota_policy_info)
quota_policy_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(quota_policy_get_iter, True)
if result.get_child_by_name('attributes-list'):
quota_policy_attributes = result['attributes-list']['quota-policy-info']
return_value = {
'name': quota_policy_attributes['policy-name']
}
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error fetching quota policy %s: %s' % (policy_name, to_native(error)),
exception=traceback.format_exc())
return return_value
def create_quota_policy(self):
"""
Creates a new quota policy
"""
quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-create")
quota_policy_obj.add_new_child("policy-name", self.parameters['name'])
quota_policy_obj.add_new_child("vserver", self.parameters['vserver'])
try:
self.server.invoke_successfully(quota_policy_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating quota policy %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def delete_quota_policy(self):
"""
Deletes a quota policy
"""
quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-delete")
quota_policy_obj.add_new_child("policy-name", self.parameters['name'])
try:
self.server.invoke_successfully(quota_policy_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting quota policy %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def rename_quota_policy(self):
"""
Rename a quota policy
"""
quota_policy_obj = netapp_utils.zapi.NaElement("quota-policy-rename")
quota_policy_obj.add_new_child("policy-name", self.parameters['from_name'])
quota_policy_obj.add_new_child("vserver", self.parameters['vserver'])
quota_policy_obj.add_new_child("new-policy-name", self.parameters['name'])
try:
self.server.invoke_successfully(quota_policy_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error renaming quota policy %s: %s' % (self.parameters['from_name'], to_native(error)),
exception=traceback.format_exc())
def apply(self):
netapp_utils.ems_log_event("na_ontap_quota_policy", self.server)
current = self.get_quota_policy()
# rename and create are mutually exclusive
rename, cd_action = None, None
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if cd_action == 'create' and self.parameters.get('from_name'):
# create policy by renaming it
rename = self.na_helper.is_rename_action(self.get_quota_policy(self.parameters['from_name']), current)
if rename is None:
self.module.fail_json(msg='Error renaming quota policy: %s does not exist.' % self.parameters['from_name'])
# check if policy should be assigned
assign_policy = cd_action == 'create' and self.parameters['auto_assign']
if cd_action is None and current and self.parameters['auto_assign']:
# find out if the existing policy needs to be changed
svm = zapis_svm.get_vserver(self.server, self.parameters['vserver'])
if svm.get('quota_policy') != self.parameters['name']:
assign_policy = True
self.na_helper.changed = True
if cd_action == 'delete':
# can't delete if already assigned
svm = zapis_svm.get_vserver(self.server, self.parameters['vserver'])
if svm.get('quota_policy') == self.parameters['name']:
self.module.fail_json(msg='Error policy %s cannot be deleted as it is assigned to the vserver %s' %
(self.parameters['name'], self.parameters['vserver']))
if self.na_helper.changed and not self.module.check_mode:
if rename:
self.rename_quota_policy()
elif cd_action == 'create':
self.create_quota_policy()
elif cd_action == 'delete':
self.delete_quota_policy()
if assign_policy:
zapis_svm.modify_vserver(self.server, self.module, self.parameters['vserver'], modify=dict(quota_policy=self.parameters['name']))
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""
Creates the NetApp Ontap quota policy object and runs the correct play task
"""
obj = NetAppOntapQuotaPolicy()
obj.apply()
if __name__ == '__main__':
main()
| 1.6875 | 2 |
snacks/tests.py | Samerodeh/DjangoX- | 0 | 12769645 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from .models import Snack
# Create your tests here.
class SnacksTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username = 'samer', email = '<EMAIL>', password = '<PASSWORD>'
)
self.snack = Snack.objects.create(
title = 'Chips', description = 'It tastes so delicious', purchaser = self.user
)
def test_StringRepresentation(self):
self.assertEqual(str(self.snack), "Chips")
def test_SnackContent(self):
self.assertEqual(f"{self.snack.title}", 'Chips')
self.assertEqual(f"{self.snack.description}", 'It tastes so delicious')
self.assertEqual(self.snack.purchaser, self.user)
def test_SnackListView(self):
url = reverse('snack_list')
actual = self.client.get(url).status_code
self.assertEqual(actual, 200)
def test_SnackDetailsView(self):
response = self.client.get(reverse('snack_details', args='1'))
self.assertEqual(response.status_code, 200)
def test_SnackCreateView(self):
response = self.client.post(reverse("snack_create"),{"title": "Laiz", "description": "Laiz is delicious", "purchaser": self.user})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Laiz')
self.assertContains(response, 'Laiz is delicious')
self.assertContains(response, 'samer')
def test_SnackUpdateView(self):
response = self.client.post(reverse('snack_update', args='1'), {'title':'Chocolate'})
self.assertContains(response, 'Chocolate')
def test_SnackDeleteView(self):
response = self.client.get(reverse("snack_delete", args="1"))
self.assertEqual(response.status_code, 200) | 2.515625 | 3 |
tests/factories.py | sarafanio/sarafan | 2 | 12769646 | import factory
from sarafan.events import Publication
from .utils import generate_rnd_hash, generate_rnd_address
class PublicationFactory(factory.Factory):
class Meta:
model = Publication
reply_to = '0x'
magnet = factory.LazyFunction(lambda: generate_rnd_hash()[2:])
source = factory.LazyFunction(generate_rnd_address)
size = 1
retention = 1
| 1.96875 | 2 |
src/cdc/songs_cdc.py | tomfran/lastfm-project | 1 | 12769647 | <reponame>tomfran/lastfm-project
from .abstract_classes import AbstractRegistryCDC
from datetime import datetime
import json
class SongsCDC(AbstractRegistryCDC):
"""
Songs cdc implementation
It implements a registry cdc logic, as songs can change over
time, and no chrono attribute can be used.
"""
def __init__(self, source, destination, syncFile, songs_to_request_dir, key_attr):
"""
Constructor
Args:
source (AbstractSource): source class to read data
destination (AbstractDestination): class that implements write logic
syncFile (str): path to the sync file to update
songs_to_request_dir (str): path to the directory containing
songs to request file
key_attr (str): attribute to be used as a key in the registry
cdc logic
"""
super().__init__(source, destination, syncFile, key_attr)
# update the songs source param list, to get the songs
# according to the sync file
self.songs_to_request_file = f"{songs_to_request_dir}/{datetime.today().strftime('%Y%m%d')}.json"
self.update_source()
def update_source(self):
"""
Get songs to request from the file in the songs to request path,
and update source parameters
"""
# read from songs to request file, that is, the songs listened by users today
with open(self.songs_to_request_file, 'r') as f:
data = json.load(f)
# source is of type 'SongsBatchSource'
self.source.update_songs_to_request(data)
def access_fields(self, table):
"""
Access the needed fields from the source data
Args:
tables (list): list of data coming from the source
Returns:
list: list of properly trimmed data
"""
def process_track(tr):
tr = tr['track']
artist = tr.get('artist')
# TODO check if every album has an image a title anb an artist
ret = {
'artist_name' : artist['name'],
'title' : tr['name'],
'duration' : tr['duration'],
'url' : tr['url'],
'song_id' : hash(tr['url'].lower())
}
album = tr.get('album')
if album:
ret.update({
'album_artist' : album['artist'],
'album_title' : album['title'],
'album_image' : album['image'][3]['#text']
})
toptags = tr.get('toptags')
if toptags:
if toptags.get('tag'):
ret['top_tags'] = tuple([d['name'] for d in toptags['tag']])
return ret
return [process_track(row) for row in table]
| 2.96875 | 3 |
xclim/testing/tests/test_cli.py | Ouranosinc/dcvar | 1 | 12769648 | #!/usr/bin/env python
# Tests for `xclim` package, command line interface
from __future__ import annotations
import numpy as np
import pytest
import xarray as xr
from click.testing import CliRunner
import xclim
from xclim.cli import cli
from xclim.testing import open_dataset
try:
from dask.distributed import Client
except ImportError:
Client = None
K2C = 273.15
@pytest.mark.parametrize(
"indicators,indnames",
[
([xclim.atmos.tg_mean], ["tg_mean"]),
(
# Note: This test is dependent on indicator name length and terminal dimensions.
[xclim.atmos.tn_mean, xclim.atmos.ice_days],
["tn_mean", "ice_days"],
),
],
)
def test_info(indicators, indnames):
runner = CliRunner()
results = runner.invoke(cli, ["info"] + indnames)
for ind in indicators:
assert ind.title in results.output
assert ind.identifier in results.output
def test_indices():
runner = CliRunner()
results = runner.invoke(cli, ["indices"])
for name, ind in xclim.core.indicator.registry.items():
assert name.lower() in results.output
@pytest.mark.parametrize(
"indicator,indname",
[
(xclim.atmos.heating_degree_days, "heating_degree_days"),
(xclim.land.base_flow_index, "base_flow_index"),
],
)
def test_indicator_help(indicator, indname):
runner = CliRunner()
results = runner.invoke(cli, [indname, "--help"])
for name in indicator.parameters.keys():
if name not in ["ds", "indexer"]:
assert name in results.output
@pytest.mark.parametrize(
"indicator,expected,varnames",
[
("tg_mean", 272.15, ["tas"]),
("dtrvar", 0.0, ["tasmin", "tasmax"]),
("heating_degree_days", 6588.0, ["tas"]),
("solidprcptot", 31622400.0, ["tas", "pr"]),
],
)
def test_normal_computation(
tasmin_series, tasmax_series, pr_series, tmp_path, indicator, expected, varnames
):
tasmin = tasmin_series(np.ones(366) + 270.15, start="1/1/2000")
tasmax = tasmax_series(np.ones(366) + 272.15, start="1/1/2000")
pr = pr_series(np.ones(366), start="1/1/2000")
ds = xr.Dataset(
data_vars={
"tasmin": tasmin,
"tasmax": tasmax,
"tas": xclim.atmos.tg(tasmin, tasmax),
"pr": pr,
}
)
input_file = tmp_path / "in.nc"
output_file = tmp_path / "out.nc"
ds.to_netcdf(input_file)
args = ["-i", str(input_file), "-o", str(output_file), "-v", indicator]
runner = CliRunner()
results = runner.invoke(cli, args)
for varname in varnames:
assert f"Parsed {varname} = {varname}" in results.output
assert "Processing :" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
outvar = list(out.data_vars.values())[0]
np.testing.assert_allclose(outvar[0], expected)
def test_multi_input(tas_series, pr_series, tmp_path):
tas = tas_series(np.ones(366) + 273.15, start="1/1/2000")
pr = pr_series(np.ones(366), start="1/1/2000")
tas_file = tmp_path / "multi_tas_in.nc"
pr_file = tmp_path / "multi_pr_in.nc"
output_file = tmp_path / "out.nc"
tas.to_dataset().to_netcdf(tas_file)
pr.to_dataset().to_netcdf(pr_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(tmp_path / "multi_*_in.nc"),
"-o",
str(output_file),
"-v",
"solidprcptot",
],
)
assert "Processing : solidprcptot" in results.output
out = xr.open_dataset(output_file)
assert out.solidprcptot.sum() == 0
def test_multi_output(tmp_path):
ds = open_dataset("ERA5/daily_surface_cancities_1990-1993.nc")
input_file = tmp_path / "ws_in.nc"
output_file = tmp_path / "out.nc"
ds.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"wind_speed_from_vector",
],
)
assert "Processing : wind_speed_from_vector" in results.output
def test_renaming_variable(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.name = "tas"
tas.to_netcdf(input_file)
with xclim.set_options(cf_compliance="warn"):
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"tn_mean",
"--tasmin",
"tas",
],
)
assert "Processing : tn_mean" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
assert out.tn_mean[0] == 1.0
def test_indicator_chain(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"tg_mean",
"growing_degree_days",
],
)
assert "Processing : tg_mean" in results.output
assert "Processing : growing_degree_days" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
assert out.tg_mean[0] == 1.0
assert out.growing_degree_days[0] == 0
def test_missing_variable(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli, ["-i", str(input_file), "-o", str(output_file), "tn_mean"]
)
assert results.exit_code == 2
assert "'tasmin' was not found in the input dataset." in results.output
@pytest.mark.parametrize(
"options,output",
[
(["--dask-nthreads", "2"], "Error: '--dask-maxmem' must be given"),
(["--chunks", "time:90"], "100% Complete"),
(["--chunks", "time:90,lat:5"], "100% Completed"),
(["--version"], xclim.__version__),
],
)
def test_global_options(tas_series, tmp_path, options, output):
if "dask" in options[0]:
pytest.importorskip("dask.distributed")
tas = tas_series(np.ones(366), start="1/1/2000")
tas = xr.concat([tas] * 10, dim="lat")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
["-i", str(input_file), "-o", str(output_file)] + options + ["tg_mean"],
)
assert output in results.output
def test_suspicious_precipitation_flags(pr_series, tmp_path):
bad_pr = pr_series(np.zeros(365), start="1971-01-01")
# Add some strangeness
bad_pr[8] = -1e-6 # negative values
bad_pr[120] = 301 / 3600 / 24 # 301mm/day
bad_pr[121:141] = 1.1574074074074072e-05 # 1mm/day
bad_pr[200:300] = 5.787037037037036e-05 # 5mm/day
input_file = tmp_path / "bad_pr.nc"
output_file = tmp_path / "out.nc"
bad_pr.to_netcdf(input_file)
runner = CliRunner()
runner.invoke(
cli, ["-i", str(input_file), "-o", str(output_file), "dataflags", "pr"]
)
with xr.open_dataset(output_file) as ds:
for var in ds.data_vars:
assert var
@pytest.mark.slow
def test_dataflags_output(tmp_path, tas_series, tasmax_series, tasmin_series):
ds = xr.Dataset()
for series, val in zip([tas_series, tasmax_series, tasmin_series], [0, 10, -10]):
vals = val + K2C + np.sin(np.pi * np.arange(366 * 3) / 366)
arr = series(vals, start="1971-01-01")
ds = xr.merge([ds, arr])
input_file = tmp_path / "ws_in.nc"
ds.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"dataflags",
"-r",
],
)
assert "Dataset passes quality control checks!" in results.output
def test_bad_usage(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
# No command
results = runner.invoke(cli, ["-i", str(input_file)])
assert "Missing command" in results.output
# Indicator not found:
results = runner.invoke(cli, ["info", "mean_ether_velocity"])
assert "Indicator 'mean_ether_velocity' not found in xclim" in results.output
# No input file given
results = runner.invoke(cli, ["-o", str(output_file), "base_flow_index"])
assert "No input file name given" in results.output
# No output file given
results = runner.invoke(cli, ["-i", str(input_file), "tg_mean"])
assert "No output file name given" in results.output
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"--dask-nthreads",
"2",
"tg_mean",
],
)
if Client is None: # dask.distributed not installed
assert "distributed scheduler is not installed" in results.output
else:
assert "'--dask-maxmem' must be given" in results.output
@pytest.mark.requires_docs
@pytest.mark.parametrize("method, pattern", [("-r", "`GH/"), ("-m", "[GH/")])
def test_release_notes(method, pattern):
runner = CliRunner()
results = runner.invoke(
cli,
["release_notes", method],
)
assert ":pull:`" not in results.output
assert ":issue:`" not in results.output
assert ":user:`" not in results.output
assert pattern in results.output
@pytest.mark.parametrize(
"method, error",
[
(
["-m", "-r"],
"Cannot return both Markdown and ReStructuredText in same release_notes call.",
),
(list(), "Must specify Markdown (-m) or ReStructuredText (-r)."),
],
)
def test_release_notes_failure(method, error):
runner = CliRunner()
results = runner.invoke(
cli,
["release_notes", *method],
)
assert error in results.output
def test_show_version_info(capsys):
runner = CliRunner()
results = runner.invoke(cli, ["show_version_info"])
assert "INSTALLED VERSIONS" in results.output
assert "python" in results.output
assert "boltons: installed" in results.output
| 2.09375 | 2 |
frangiclave/csjson.py | frangiclave/frangiclave-compendium | 16 | 12769649 | from typing import Any, List, Dict, Union, Tuple, TextIO
# Left here for now; should I delete it?
def load(fh: TextIO) -> Union[Dict[str, Any], List[Any]]:
return loads(fh.read())
def loads(json: str) -> Union[Dict[str, Any], List[Any]]:
length = len(json)
num = 0
num = _move_to_next_node(json, num, length)
if num < length:
end = _find_matching_end(json, num, length)
if json[num] == '{':
return _read_dictionary(json, num, end)[0]
elif json[num] == '[':
return _read_list(json, num, end)[0]
def _move_to_next_node(json: str, begin: int, end: int) -> int:
flag = False
for i in range(begin, end):
c = json[i]
if c == '"' and i > 0 and json[i - 1] != '"':
flag = not flag
if not flag:
if c == '{' or c == '[':
return i
return end
def _find_matching_end(json: str, begin: int, end: int) -> int:
num_braces = 0
num_brackets = 0
flag = False
for i in range(begin, end):
c = json[i]
if i == 0 or json[i - 1] != '\\':
if c == '"':
flag = not flag
elif not flag:
if c == '{':
num_braces += 1
elif c == '[':
num_brackets += 1
elif c == '}':
num_braces -= 1
elif c == ']':
num_brackets -= 1
if num_braces == 0 and num_brackets == 0:
return i
return end
def _read_dictionary(
json: str,
begin: int,
end: int
) -> Tuple[Dict[str, Any], int]:
dictionary = {}
num = 1
flag = False
text = '\r\n\t ?"\'\\,:{}[]'
text2 = r''
text3 = r''
i = begin + 1
while i < end:
flag2 = False
c = json[i]
if i == 0 or json[i - 1] != '\\':
if c == '"':
flag = not flag
if not flag:
if num != 1 and c == ',':
text3 = _trim_property_value(text3)
if len(text2) > 0 and text2 not in dictionary \
and len(text3) > 0:
dictionary[text2] = _json_decode(text3)
num = 1
text2 = ''
text3 = ''
flag2 = True
if num == 1 and c == ':':
num = 2
text3 = ''
flag2 = True
if num == 2 and c == '{':
end2 = _find_matching_end(json, i, end)
dictionary[text2], i = _read_dictionary(json, i, end2)
text3 = ''
num = 0
flag2 = True
if num == 2 and c == '[':
end3 = _find_matching_end(json, i, end)
dictionary[text2], i = _read_list(json, i, end3)
text3 = ''
num = 0
flag2 = True
if not flag2:
if num == 1 and c not in text:
text2 += c
if num == 2:
text3 += c
i += 1
if len(text2) > 0 and text2 not in dictionary:
text3 = _trim_property_value(text3)
if len(text3) > 0:
dictionary[text2] = _json_decode(text3)
return dictionary, i
def _read_list(
json: str,
begin: int,
end: int
) -> Tuple[List[Any], int]:
_list = []
flag = False
text = ""
i = begin + 1
while i < end:
flag2 = False
c = json[i]
if i == 0 or json[i - 1] != '\\':
if c == '"':
flag = not flag
if not flag:
if c == '{':
end2 = _find_matching_end(json, i, end)
dictionary, i = _read_dictionary(json, i, end2)
_list.append(dictionary)
text = ''
flag2 = True
elif c == '[':
end3 = _find_matching_end(json, i, end)
dictionary, i = _read_list(json, i, end3)
_list.append(dictionary)
text = ''
flag2 = True
elif c == ',':
text = _trim_property_value(text)
if len(text):
_list.append(_json_decode(text))
text = ''
flag2 = True
if not flag2:
text += c
i += 1
text = _trim_property_value(text)
if len(text) > 0:
_list.append(_json_decode(text))
return _list, i
def _trim_property_value(value: str) -> str:
value = value.strip()
if not value:
result = ''
else:
while len(value) > 1 and value[0] == '\r'\
or value[0] == '\n' or value[0] == '\t' or value[0] == ' ':
value = value[1:]
while len(value) > 0 and value[-1] == '\r'\
or value[-1] == '\n' or value[-1] == '\t' or value[-1] == ' ':
value = value[:-1]
if len(value) >= 2 and value[0] == '"' and value[-1] == '"':
result = value[1:-1]
else:
result = value
return result
def _json_decode(json_string: str) -> str:
json_string = json_string.replace('\\/', '/')
json_string = json_string.replace('\\n', '\n')
json_string = json_string.replace('\\r', '\r')
json_string = json_string.replace('\\t', '\t')
json_string = json_string.replace('\\"', '"')
json_string = json_string.replace('\\\\', '\\')
return json_string
| 2.90625 | 3 |
cascad/agents/aritifcial_system/chain.py | Will-Holden/cascadv2 | 0 | 12769650 | <filename>cascad/agents/aritifcial_system/chain.py
"""Simulate the chain where all smartcontract stored
"""
import uuid
# from utils.myfuncs import Singleton
from cascad.utils.myfuncs import Singleton
from cascad.models.kb import Entity, Property
from cascad.utils.constant import *
from cascad.utils import get_id
has_contract = Property("has_contract", "has_contract")
has_address = Property("has_address", "has_address")
class ChainBase(metaclass=Singleton):
_name = "ChainBase"
def __init__(self):
self.store = {}
self.entity = Entity(get_id(), self._name)
self.entity['name'] = self._name
def gen_address(self):
return str(uuid.uuid4())
def add_contract(self, contract):
address = self.gen_address()
self.store[address] = contract
self.entity[has_contract] = contract.entity
contract.entity[has_address] = address
return address
def get_contract(self, address):
return self.store[address]
def __getitem__(self, address):
return self.get_contract(address)
| 2.375 | 2 |