content stringlengths 5 1.05M |
|---|
from pathlib import Path
import unittest
from pxr import Sdf,Usd, UsdShade
from vox2usd import Vox2UsdConverter
DATA_PATH = Path(__file__).parent.joinpath("data")
class TestUsdPrim(unittest.TestCase):
def test_default_prim(self):
filename = "test_geomsubsets"
vox2usd = Vox2UsdConverter(DATA_PATH / "{}.vox".format(filename))
vox2usd.convert()
stage_file = DATA_PATH / "{}.usd".format(filename)
stage = Usd.Stage.Open(str(stage_file))
default_prim = stage.GetDefaultPrim()
self.assertTrue(default_prim)
self.assertEqual(default_prim.GetPath(), Sdf.Path("/test_geomsubsets"))
model_api = Usd.ModelAPI(default_prim)
self.assertEqual(model_api.GetAssetName(), filename)
self.assertEqual(model_api.GetAssetIdentifier().path, "{}.usd".format(filename))
self.assertEqual(model_api.GetKind(), "component")
def test_variant_sets(self):
filename = "test_geomsubsets"
vox2usd = Vox2UsdConverter(DATA_PATH / "{}.vox".format(filename))
vox2usd.convert()
stage_file = DATA_PATH / "{}.usd".format(filename)
stage = Usd.Stage.Open(str(stage_file))
asset_prim = stage.GetDefaultPrim()
vsets = asset_prim.GetVariantSets()
self.assertTrue(vsets.HasVariantSet("Geometry"))
geom_vset = vsets.GetVariantSet("Geometry")
self.assertTrue(geom_vset.HasAuthoredVariant("MergedMeshes"))
self.assertTrue(geom_vset.HasAuthoredVariant("PointInstances"))
self.assertEqual(geom_vset.GetVariantSelection(), "MergedMeshes")
self.assertTrue(vsets.HasVariantSet("Shader"))
shader_vset = vsets.GetVariantSet("Shader")
self.assertTrue(shader_vset.HasAuthoredVariant("Preview"))
self.assertTrue(shader_vset.HasAuthoredVariant("Omniverse"))
self.assertEqual(shader_vset.GetVariantSelection(), "Omniverse")
self.assertFalse(vsets.HasVariantSet("PointShape"))
geom_vset.SetVariantSelection("PointInstances")
self.assertTrue(vsets.HasVariantSet("PointShape"))
pt_shape_vset = vsets.GetVariantSet("PointShape")
self.assertTrue(pt_shape_vset.HasAuthoredVariant("Cubes"))
self.assertTrue(pt_shape_vset.HasAuthoredVariant("Spheres"))
self.assertTrue(pt_shape_vset.HasAuthoredVariant("Studs"))
self.assertEqual(pt_shape_vset.GetVariantSelection(), "Cubes")
def test_multilayer_model(self):
filename = "test_geomsubsets"
vox2usd = Vox2UsdConverter(DATA_PATH / "{}.vox".format(filename))
vox2usd.convert()
stage_file = DATA_PATH / "{}.usd".format(filename)
self.assertTrue(stage_file.exists())
mesh_crate = DATA_PATH / "{}.mesh.usdc".format(filename)
self.assertTrue(mesh_crate.exists())
points_crate = DATA_PATH / "{}.points.usdc".format(filename)
self.assertTrue(points_crate.exists())
stage = Usd.Stage.Open(str(stage_file))
mesh_prim = stage.GetPrimAtPath("/test_geomsubsets/Geometry/VoxelRoot/VoxelModel_3")
self.assertTrue(mesh_prim)
self.assertEqual(mesh_prim.GetTypeName(), "Mesh")
asset_prim = stage.GetDefaultPrim()
vsets = asset_prim.GetVariantSets()
geom_vset = vsets.GetVariantSet("Geometry")
result = geom_vset.SetVariantSelection("PointInstances")
pt_instancer_prim = stage.GetPrimAtPath("/test_geomsubsets/Geometry/VoxelRoot/VoxelModel_3")
self.assertTrue(pt_instancer_prim)
self.assertEqual(pt_instancer_prim.GetTypeName(), "PointInstancer")
def test_geomsubsets(self):
filename = "test_geomsubsets"
vox2usd = Vox2UsdConverter(DATA_PATH / "{}.vox".format(filename))
vox2usd.convert()
stage_file = DATA_PATH / "{}.usd".format(filename)
self.assertTrue(stage_file.exists())
mesh_crate = DATA_PATH / "{}.mesh.usdc".format(filename)
self.assertTrue(mesh_crate.exists())
points_crate = DATA_PATH / "{}.points.usdc".format(filename)
self.assertTrue(points_crate.exists())
stage = Usd.Stage.Open(str(stage_file))
self.assertTrue(stage)
mesh_prim = stage.GetPrimAtPath("/test_geomsubsets/Geometry/VoxelRoot/VoxelModel_3")
geomsubsets = mesh_prim.GetChildren()
self.assertEqual(len(geomsubsets), 2)
for subset in geomsubsets:
face_indices = subset.GetAttribute("indices").Get()
self.assertTrue(len(face_indices), 5)
binding_api = UsdShade.MaterialBindingAPI.Get(stage, subset.GetPath())
mtl_path = binding_api.GetDirectBindingRel().GetTargets()[0]
self.assertTrue(mtl_path)
|
# hgweb/wsgicgi.py - CGI->WSGI translator
#
# Copyright 2006 Eric Hopper <hopper@omnifarious.org>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
#
# This was originally copied from the public domain code at
# http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
from __future__ import absolute_import
import os
from .. import (
pycompat,
)
from ..utils import (
procutil,
)
from . import (
common,
)
def launch(application):
procutil.setbinary(procutil.stdin)
procutil.setbinary(procutil.stdout)
environ = dict(os.environ.iteritems()) # re-exports
environ.setdefault(r'PATH_INFO', '')
if environ.get(r'SERVER_SOFTWARE', r'').startswith(r'Microsoft-IIS'):
# IIS includes script_name in PATH_INFO
scriptname = environ[r'SCRIPT_NAME']
if environ[r'PATH_INFO'].startswith(scriptname):
environ[r'PATH_INFO'] = environ[r'PATH_INFO'][len(scriptname):]
stdin = procutil.stdin
if environ.get(r'HTTP_EXPECT', r'').lower() == r'100-continue':
stdin = common.continuereader(stdin, procutil.stdout.write)
environ[r'wsgi.input'] = stdin
environ[r'wsgi.errors'] = procutil.stderr
environ[r'wsgi.version'] = (1, 0)
environ[r'wsgi.multithread'] = False
environ[r'wsgi.multiprocess'] = True
environ[r'wsgi.run_once'] = True
if environ.get(r'HTTPS', r'off').lower() in (r'on', r'1', r'yes'):
environ[r'wsgi.url_scheme'] = r'https'
else:
environ[r'wsgi.url_scheme'] = r'http'
headers_set = []
headers_sent = []
out = procutil.stdout
def write(data):
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
# Before the first output, send the stored headers
status, response_headers = headers_sent[:] = headers_set
out.write('Status: %s\r\n' % pycompat.bytesurl(status))
for hk, hv in response_headers:
out.write('%s: %s\r\n' % (pycompat.bytesurl(hk),
pycompat.bytesurl(hv)))
out.write('\r\n')
out.write(data)
out.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0](exc_info[1], exc_info[2])
finally:
exc_info = None # avoid dangling circular ref
elif headers_set:
raise AssertionError("Headers already set!")
headers_set[:] = [status, response_headers]
return write
content = application(environ, start_response)
try:
for chunk in content:
write(chunk)
if not headers_sent:
write('') # send headers now if body was empty
finally:
getattr(content, 'close', lambda: None)()
|
import glob
with open('merge.sh', 'w') as newfile:
#glob.glob('/home/jegan/bccgc4/GLIDE_Docking/'+method+'_Docking/cluster_dock_input_'+method+'__testing_TICA_pdb_*__dock_pv.maegz')
#print(paths)
paths = glob.glob('./all_poses/*.maegz')
print(paths)
newfile.write('module load schrodinger\n')
newfile.write('$SCHRODINGER/utilities/glide_ensemble_merge -JOBID ')
for k in paths:
newfile.write(k+' ')
|
from django.utils.translation import gettext as _
ERROR_UNRECOGNIZED = 10
ERROR_INCOMPLETE_DATA = 11
ERROR_INVALID_HANDLER = 12
ERROR_INVALID_DATA_TYPE = 13
WS_CONNECTED = 20
WS_DISCONNECTED = 21
WS_DENIED = 22
LEVEL_TYPE_NEUTRAL = 30
LEVEL_TYPE_SUCCESS = 31
LEVEL_TYPE_WARNING = 32
LEVEL_TYPE_ERROR = 33
FRAME_NO_ACTION_TAKEN = 40
FRAME_ACTION_STARTED = 41
FRAME_ACTION_FINISHED = 42
FRAME_ACTION_FORCE_TERMINATED = 43
FRAME_ACTION_ABORTED = 44
ERROR_MESSAGES = {
ERROR_UNRECOGNIZED: _('Unknown error occurred.'),
ERROR_INCOMPLETE_DATA: _('Incomplete data received.'),
ERROR_INVALID_HANDLER: _('The received handler is invalid.'),
ERROR_INVALID_DATA_TYPE: _('Invalid data type.')
}
WS_CONNECTION_STATUS = {
WS_CONNECTED: _('CONNECTED'),
WS_DISCONNECTED: _('DISCONNECTED'),
WS_DENIED: _('DENIED')
}
MESSAGE_LEVEL_CHOICES = {
LEVEL_TYPE_SUCCESS: _('SUCCESS'),
LEVEL_TYPE_WARNING: _('WARNING'),
LEVEL_TYPE_ERROR: _('ERROR'),
LEVEL_TYPE_NEUTRAL: _('NEUTRAL'),
}
MESSAGE_FRAME_ACTION_CHOICES = {
FRAME_NO_ACTION_TAKEN: _('NO_ACTION_TAKEN'),
FRAME_ACTION_STARTED: _('STARTED'),
FRAME_ACTION_FINISHED: _('COMPLETED'),
FRAME_ACTION_FORCE_TERMINATED: _('FORCE_TERMINATED'),
FRAME_ACTION_ABORTED: _('FRAME_ACTION_ABORTED')
}
|
# Solution: two-pointers
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
result = nums[0] + nums[1] + nums[2]
for i in range(len(nums)-2):
if i >0 and nums[i] == nums[i-1]:
continue
left, right = i+1, len(nums)-1
while left < right:
cur = nums[i] + nums[left] + nums[right]
if abs(cur - target) < abs(result - target):
result = cur
if cur < target:
left += 1
elif cur > target:
right -= 1
else:
return result
return result |
"""
Copyright 2018-2020 Jakub Kuczys (https://github.com/jack1142)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import itertools
import re
from typing import TYPE_CHECKING, Iterator, Optional, Union
import discord
from redbot.core import commands
from redbot.core.commands import GuildContext
"""
Converters below are originally from permissions core cog:
https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/permissions/converters.py
"""
MENTION_RE = re.compile(r"^<?(?:(?:@[!&]?)?|#)(\d{15,21})>?$")
def _match_id(arg: str) -> Optional[int]:
m = MENTION_RE.match(arg)
if m:
return int(m.group(1))
return None
if TYPE_CHECKING:
MemberOrRole = Union[discord.Member, discord.Role]
else:
class MemberOrRole:
@classmethod
async def convert(
cls, ctx: GuildContext, argument: str
) -> Union[discord.Member, discord.Role]:
guild: discord.Guild = ctx.guild
_id = _match_id(argument)
if _id is not None:
member: Optional[discord.Member] = guild.get_member(_id)
if member is not None:
return member
role: Optional[discord.Role] = guild.get_role(_id)
if role is not None and not role.is_default():
return role
f = filter(lambda r: not r.is_default(), guild.roles)
# wrong inferred type: https://github.com/python/mypy/issues/8226
objects: Iterator[Union[discord.Member, discord.Role]] = itertools.chain(
guild.members, f
)
maybe_matches = []
for obj in objects:
if obj.name == argument or str(obj) == argument:
maybe_matches.append(obj)
maybe_nick = getattr(obj, "nick", None)
if maybe_nick is not None and maybe_nick == argument:
maybe_matches.append(obj)
if not maybe_matches:
raise commands.BadArgument(
f"'{argument}' was not found. It must be the ID, mention,"
" or name of a channel, user or role in this server."
)
if len(maybe_matches) == 1:
return maybe_matches[0]
raise commands.BadArgument(
f"'{argument}' does not refer to a unique channel, user or role."
" Please use the ID for whatever/whoever"
" you're trying to specify, or mention it/them."
)
if TYPE_CHECKING:
MemberOrRoleOrVoiceChannel = Union[
discord.VoiceChannel, discord.Member, discord.Role
]
else:
class MemberOrRoleOrVoiceChannel:
@classmethod
async def convert(
cls, ctx: GuildContext, argument: str
) -> Union[discord.VoiceChannel, discord.Member, discord.Role]:
guild: discord.Guild = ctx.guild
_id = _match_id(argument)
if _id is not None:
channel: Optional[discord.abc.GuildChannel] = guild.get_channel(_id)
if isinstance(channel, discord.VoiceChannel):
return channel
member: Optional[discord.Member] = guild.get_member(_id)
if member is not None:
return member
role: Optional[discord.Role] = guild.get_role(_id)
if role is not None and not role.is_default():
return role
f = filter(lambda r: not r.is_default(), guild.roles)
# wrong inferred type: https://github.com/python/mypy/issues/8226
objects: Iterator[
Union[discord.VoiceChannel, discord.Member, discord.Role]
] = itertools.chain(guild.voice_channels, guild.members, f)
maybe_matches = []
for obj in objects:
if obj.name == argument or str(obj) == argument:
maybe_matches.append(obj)
maybe_nick = getattr(obj, "nick", None)
if maybe_nick is not None and maybe_nick == argument:
maybe_matches.append(obj)
if not maybe_matches:
raise commands.BadArgument(
f"'{argument}' was not found. It must be the ID, mention,"
" or name of a channel, user or role in this server."
)
if len(maybe_matches) == 1:
return maybe_matches[0]
raise commands.BadArgument(
f"'{argument}' does not refer to a unique channel, user or role."
" Please use the ID for whatever/whoever you're trying to specify,"
" or mention it/them."
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# stemdl_classification.py
# Stemdl Classification benchmark
# SciML-Bench
# Copyright © 2022 Scientific Machine Learning Research Group
# Scientific Computing Department, Rutherford Appleton Laboratory
# Science and Technology Facilities Council, UK.
# All rights reserved.
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.plugins import DDPPlugin
# imports from stemdl
import time,sys, os, math, glob,argparse, yaml, decimal
import torch.backends.cudnn as cudnn
import torch.multiprocessing as mp
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms, models
from tqdm import tqdm
from sklearn.metrics import f1_score
import torch.nn as nn
import numpy as np
from pathlib import Path
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import ToTensor
# MLCommons logging
from mlperf_logging import mllog
import logging
# Custom dataset class
class NPZDataset(Dataset):
def __init__(self, npz_root):
self.files = glob.glob(npz_root + "/*.npz")
def __getitem__(self, index):
sample = np.load(self.files[index])
x = torch.from_numpy(sample["data"])
y = sample["label"][0]
return (x, y)
def __len__(self):
return len(self.files)
# StemdlModel
class StemdlModel(pl.LightningModule):
def __init__(self):
super().__init__()
self.input_size = 128
self.num_classes = 231
self.model_name = "resnet50"
self.model = models.resnet50(pretrained=False)
self.num_ftrs = self.model.fc.in_features
self.model.fc = nn.Linear(self.num_ftrs, self.num_classes)
self.params_to_update = self.model.parameters()
self.feature_extract = False
# forward step
def forward(self, x):
embedding = self.model(x)
return embedding
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def training_step(self, train_batch, batch_idx):
x, y = train_batch
x_hat = self.model(x)
y = F.one_hot(y, num_classes=231).float()
loss = F.mse_loss(x_hat, y)
self.log('train_loss', loss)
return loss
def validation_step(self, val_batch, batch_idx):
x, y = val_batch
x_hat = self.model(x)
y = F.one_hot(y, num_classes=231).float()
loss = F.mse_loss(x_hat, y)
self.log('train_loss', loss)
return loss
def test_step(self, test_batch, batch_idx):
x, y = test_batch
x_hat = self.model(x)
y = F.one_hot(y, num_classes=231).float()
loss = F.mse_loss(x_hat, y)
self.log('test_loss', loss)
return loss
def predict_step(self, batch, batch_idx, dataloader_idx=0):
x, y = batch
y_hat = self.model(x)
return y_hat
#
# Running the code:
# python stemdl_classification.py --config stemdlConfig.yaml
#
def main():
# Read command line arguments
parser = argparse.ArgumentParser(description='Stemdl command line arguments',\
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--config', default=os.path.expanduser('./stemdlConfig.yaml'), help='path to config file')
args = parser.parse_args()
configFile = os.path.expanduser(args.config)
# Read YAML file
with open(configFile, 'r') as stream:
config = yaml.safe_load(stream)
# MLCommons logging
mlperf_logfile = os.path.expanduser(config['mlperf_logfile'])
mllog.config(filename=mlperf_logfile)
mllogger = mllog.get_mllogger()
logger = logging.getLogger(__name__)
# Initiase trainer object
trainer = pl.Trainer(gpus=int(config['gpu']), num_nodes=int(config['nodes']), precision=16, strategy="ddp", max_epochs=int(config['epochs']))
if (trainer.global_rank == 0):
mllogger.event(key=mllog.constants.SUBMISSION_BENCHMARK, value=config['benchmark'])
mllogger.event(key=mllog.constants.SUBMISSION_ORG, value=config['organisation'])
mllogger.event(key=mllog.constants.SUBMISSION_DIVISION, value=config['division'])
mllogger.event(key=mllog.constants.SUBMISSION_STATUS, value=config['status'])
mllogger.event(key=mllog.constants.SUBMISSION_PLATFORM, value=config['platform'])
mllogger.start(key=mllog.constants.INIT_START)
# Values extracted from stemdlConfig.yaml
mllogger.event(key='number_of_ranks', value=config['gpu'])
mllogger.event(key='number_of_nodes', value=config['nodes'])
mllogger.event(key='accelerators_per_node', value=config['accelerators_per_node'])
mllogger.end(key=mllog.constants.INIT_STOP)
mllogger.event(key=mllog.constants.EVAL_START, value="Start:Loading datasets")
# Datasets
train_dataset = NPZDataset(os.path.expanduser(config['train_dir']))
val_dataset = NPZDataset(os.path.expanduser(config['val_dir']))
test_dataset = NPZDataset(os.path.expanduser(config['test_dir']))
predict_dataset = NPZDataset(os.path.expanduser(config['inference_dir']))
# Data loaders
train_loader = DataLoader(dataset=train_dataset,batch_size=int(config['batchsize']),num_workers=4)
val_loader = DataLoader(dataset=val_dataset,batch_size=int(config['batchsize']),num_workers=4)
test_loader = DataLoader(dataset=test_dataset,batch_size=int(config['batchsize']),num_workers=4)
predict_loader = DataLoader(dataset=predict_dataset,batch_size=int(config['batchsize']),num_workers=4)
if (trainer.global_rank == 0):
mllogger.event(key=mllog.constants.EVAL_STOP, value="Stop: Loading datasets")
mllogger.event(key=mllog.constants.EVAL_START, value="Start: Loading model")
# Model
model = StemdlModel()
if (trainer.global_rank == 0):
mllogger.event(key=mllog.constants.EVAL_STOP, value="Stop: Loading model")
# Training
samples = train_dataset.__len__()
samples_per_gpu = int(samples)/int(config['gpu'])
start = time.time()
if (trainer.global_rank == 0):
mllogger.event(key=mllog.constants.EVAL_START, value="Start: Training")
trainer.fit(model, train_loader, val_loader)
if (trainer.global_rank == 0):
mllogger.event(key=mllog.constants.EVAL_STOP, value="Stop: Training")
diff = time.time() - start
elapsedTime = decimal.Decimal(diff)
training_per_epoch = elapsedTime/int(config['epochs'])
training_per_epoch_str = f"{training_per_epoch:.2f}"
log_file = os.path.expanduser(config['log_file'])
if (trainer.global_rank == 0):
with open(log_file, "a") as logfile:
logfile.write(f"Stemdl training, samples_per_gpu={samples_per_gpu}, resnet={config['resnet']}, epochs={config['epochs']}, bs={config['batchsize']}, nodes={config['nodes']}, gpu={config['gpu']}, training_per_epoch={training_per_epoch_str}\n")
# Testing
if(trainer.global_rank == 0):
mllogger.event(key=mllog.constants.EVAL_START, value="Start: Testing")
trainer.test(model, test_loader)
if(trainer.global_rank == 0):
mllogger.event(key=mllog.constants.EVAL_STOP, value="Stop: Testing")
# Inference
number_inferences = predict_dataset.__len__()
number_inferences_per_gpu = int(number_inferences)/(int(config['gpu'])*int(config['nodes']))
if(trainer.global_rank == 0):
mllogger.event(key=mllog.constants.EVAL_START, value="Start: Inferences")
start = time.time()
predictions = trainer.predict(model, dataloaders=predict_loader)
diff = time.time() - start
if(trainer.global_rank == 0):
mllogger.event(key=mllog.constants.EVAL_STOP, value="Stop: Inferences")
elapsedTime = decimal.Decimal(diff)
time_per_inference = elapsedTime/number_inferences
time_per_inference_str = f"{time_per_inference:.6f}"
if(trainer.global_rank == 0):
with open(log_file, "a") as logfile:
logfile.write(f"Stemdl inference, inferences_per_gpu={number_inferences_per_gpu}, bs={config['batchsize']}, nodes={config['nodes']}, gpu={config['gpu']}, time_per_inference={time_per_inference_str}\n")
mllogger.end(key=mllog.constants.RUN_STOP, value="STEMLD benchmark run finished", metadata={'status': 'success'})
if __name__ == "__main__":
main()
|
dado = input('Digite algo: ')
tipo = type(dado)
print('vc digitou um dado tipo: \033[35m{}'.format(tipo))
if dado.isalpha():
print('\033[36mAlpha')
if dado.isalnum():
print('\033[36mAlphaNum')
if dado.isnumeric():
print('\033[36mNumerico')
if dado.isprintable():
print('\033[36mPrintavel')
if dado.isupper():
print('\033[36mUpper')
if dado.islower():
print('\033[36mMinusculo') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import os
import sys
import re
import traceback
import argparse
from ruamel.yaml import YAML
yaml = YAML()
reload(sys)
sys.setdefaultencoding('utf-8')
content_regex = re.compile(r'---([\s\S]*?)---([\s\S]*)')
replace_regex_list = [
# (re.compile(r'^```(.*?)\n(.*?)\n```', re.DOTALL), r'{{< highlight \1 >}}\n\2\n{{< /highlight >}}'),
(re.compile(r'<!--\smore\s-->'), '<!--more-->'),
(re.compile(r'\{%\sraw\s%\}(.*)\{%\sendraw\s%\}'), r'\1')
]
filename_regex = re.compile(r'(\d+-\d+-\d+)-(.*)')
# class MyDumper(yaml.RoundTripDumper):
# def increase_indent(self, flow=False, sequence=None, indentless=False):
# return super(MyDumper, self).increase_indent(
# flow=flow,
# sequence=sequence,
# indentless=indentless)
def convert_front_matter(front_data, post_date, url):
del front_data['id']
del front_data['layout']
front_data["date"] = post_date.strftime('%Y-%m-%dT%H:%M:%S+08:00')
for tag in ['tags', 'categories', 'category']:
if tag in front_data and isinstance(front_data[tag], basestring):
front_data[tag] = front_data[tag].split(' ')
if 'category' in front_data:
front_data['categories'] = front_data['category']
del front_data['category']
front_data['title'] = front_data.pop("title")
# front_data['author'] =
# front_data['tagline'] = front_data.pop("tagline", None)
front_data['date'] = front_data.pop("date")
front_data['draft'] = False
# front_data['type'] = "single"
front_data['layout'] = "single"
front_data['comment'] = True
front_data['keywords'] = []
front_data['description'] = None
_url = url.split('/')
_url.pop(-1)
front_data['aliases'] = [
"/{}{}".format(front_data['categories'][0], url),
"/{}{}.html".format(front_data['categories'][0], "/".join(_url))
]
front_data['categories'] = front_data.pop("categories")
front_data['tags'] = front_data.pop("tags")
front_data['original'] = True
front_data['reference'] = []
print front_data, post_date, url
def convert_body_text(body_text):
result = body_text
for regex, replace_with in replace_regex_list:
result = regex.sub(replace_with, result)
return result
def write_out_file(front_data, body_text, out_file_path):
with open(out_file_path, 'w') as f:
f.write('---\n')
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.dump(front_data, f)
f.write('---\n')
f.write('\n'.join(body_text.splitlines()))
def parse_from_filename(filename):
slug = os.path.splitext(filename)[0]
m = filename_regex.match(slug)
if m:
slug = m.group(2)
post_date = datetime.strptime(m.group(1), '%Y-%m-%d')
return post_date, '/%s/%s/' % (post_date.strftime('%Y/%m/%d'), slug)
return None, '/' + slug
def convert_post(file_path, out_dir):
filename = os.path.basename(file_path)
post_date, url = parse_from_filename(filename)
content = ''
with open(file_path, 'r') as f:
content = f.read()
m = content_regex.match(content)
if not m:
print 'Error match content: %s' % file_path
return False
front_data = yaml.load(m.group(1))
if not front_data:
print 'Error load yaml: %s' % file_path
return False
'''
if 'layout' in front_data:
if post_date:
out_dir = os.path.join(out_dir, front_data['layout'], str(post_date.year))
else:
out_dir = os.path.join(out_dir, front_data['layout'])
'''
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_file_path = os.path.join(out_dir, filename)
convert_front_matter(front_data, post_date, url)
body_text = convert_body_text(m.group(2))
write_out_file(front_data, body_text, out_file_path)
return True
def convert(src_dir, out_dir):
count = 0
error = 0
for root, dirs, files in os.walk(src_dir):
for filename in files:
try:
if os.path.splitext(filename)[1] != '.md' \
or filename in ['README.md', 'LICENSE.md']:
continue
file_path = os.path.join(root, filename)
common_prefix = os.path.commonprefix(
[src_dir, file_path])
rel_path = os.path.relpath(
os.path.dirname(file_path), common_prefix)
real_out_dir = os.path.join(out_dir, rel_path)
if convert_post(file_path, real_out_dir):
print 'Converted: %s' % file_path
count += 1
else:
error += 1
except Exception as e:
error += 1
print 'Error convert: %s \nException: %s' % (file_path, e)
print traceback.print_exc()
print '--------\n%d file converted! %s' % (count, 'Error count: %d' % error if error > 0 else 'Congratulation!!!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Convert Jekyll blog to Hugo')
parser.add_argument(
'src_dir', help='jekyll post dir')
parser.add_argument(
'out_dir', help='hugo root path')
args = parser.parse_args()
convert(
os.path.abspath(args.src_dir),
os.path.abspath(args.out_dir))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import json
import argparse
import re
import numpy as np
import csv
import sys
from get_target_stat import load_stat
##print all class with its counts##
if __name__ == '__main__':
"""
(F)python get_prune_topic.py --mode train --stat stat_train.csv --threshold 30
(T)python get_prune_topic.py --stat stat_train.csv --threshold 15
--stat youtube_output.csv --threshold 30
"""
parser = argparse.ArgumentParser()
parser.add_argument('--stat', default = '', action='store', help='statistic data file to topic-poparity occurrences')
parser.add_argument('--threshold', default = 10, type=int, help='threshold to prune category')
print(sys.argv)
try:
args = parser.parse_args(sys.argv[1:])
load_stat(args.stat, args.threshold)
except:
parser.print_help()
|
# -*- coding: utf-8 -*-
"""Contains functions for generating and reading expression counts."""
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import itertools
import shutil
import subprocess
import tempfile
from typing import Any, Iterable
import pathlib2 as pathlib
import numpy as np
import pandas as pd
import toolz
from imfusion.external.feature_counts import feature_counts
# Disable E1101 checks which stumble on pandas classes.
# pylint: disable=E1101
def generate_exon_counts(
bam_files, # type: Iterable[pathlib.Path]
gtf_path, # type: pathlib.Path
names=None, # type: List[str]
extra_kws=None, # type: Dict[str, Iterable[Any]]
tmp_dir=None, # type: pathlib.Path
keep_tmp=False, # type: bool
): # type: (...) -> pd.DataFrame
"""Generates exon counts for given bam files using featureCounts.
This function is used to generate a m-by-n matrix (m = number of samples,
n = number of exons) of exon expression counts. This matrix is generated
using featureCounts, whose results are then parsed and returned.
Parameters
----------
bam_files : list[pathlib.Path]
List of paths to the bam files for which counts should be generated.
gtf_path : pathlib.Path
Path to the GTF file containing gene features.
names : dict[str, str]
Alternative names to use for the given bam
files. Keys of the dict should correspond to bam file paths, values
should reflect the sample names that should be used in the
resulting count matrix.
extra_kws : dict[str, tuple]:
Dictionary of extra arguments that should be passed to feature counts.
Keys should correspond to argument names (including dashes),
values should be tuples containing the argument values.
tmp_dir : pathlib.Path
Temp directory to use for generating counts.
keep_tmp : bool
Whether to keep the temp directory (default = False).
**kwargs
Any kwargs are passed to `feature_counts`.
Returns
-------
pandas.DataFrame
DataFrame containing counts. The index of the DataFrame contains
gene ids corresponding to exons in the gff file, the columns
correspond to samples/bam files. Column names are either the bam
file paths, or the alternative sample names if given.
"""
# Set exon-level parameters for feature counts.
default_kws = {
'-f': True, # Feature level
'-t': 'exonic_part', # Use 'exonic_part' features.
'--minOverlap': '1', # Minimum overlap with exon.
'-O': True # Include if spanning 1+ exons.
}
extra_kws = toolz.merge(default_kws, extra_kws or {})
# Create tmpdir if needed.
if tmp_dir is None:
tmp_dir = pathlib.Path(tempfile.mkdtemp())
elif not tmp_dir.exists():
tmp_dir.mkdir(parents=True)
# Run feature counts and read output.
try:
output_path = tmp_dir / 'counts.txt'
feature_counts(
bam_files=bam_files,
gtf_path=gtf_path,
output_path=output_path,
extra_kws=extra_kws)
counts = _read_feature_count_output(output_path, names=names)
finally:
if not keep_tmp:
shutil.rmtree(str(tmp_dir))
# Drop/rename columns.
counts.drop('Length', axis=1, inplace=True)
counts.rename(
columns={
'Geneid': 'gene_id',
'Chr': 'chr',
'Start': 'start',
'End': 'end',
'Strand': 'strand'
},
inplace=True)
# Set and sort by index.
counts.set_index(
['gene_id', 'chr', 'start', 'end', 'strand'], inplace=True)
counts.sort_index(inplace=True)
return counts
def _read_feature_count_output(file_path, names=None):
# type (pathlib.Path, Dict[str, str]) -> pd.DataFrame
"""Reads counts from featureCounts output.
Parameters
----------
file_path : pathlib.Path
Path to count file.
names : Optional[Dict[str, str]]
Optional dictionary that maps featureCount column names, which are
typically file paths, to more readable sample names.
"""
# Read counts.
counts = pd.read_csv(
str(file_path), sep='\t', comment='#', dtype={'Chr': 'str'})
# If names are given, rename columns.
if names is not None:
for name in names:
if name not in counts.columns:
# TODO: Use logging.
print('Warning: missing sample {} for renaming'.format(name))
counts = counts.rename(columns=names)
return counts
def read_exon_counts(file_path):
# type: (pathlib.Path) -> pd.DataFrame
"""Reads exon counts from an IM-Fusion expression file.
Parameters
----------
file_path : pathlib.Path
Path to the exon count file.
gene_id : Optional[str]
Optional gene_id.
Returns
-------
pd.DataFrame
Matrix of exon counts. The rows correspond to the
counted features, the columns correspond to the index values
(chomosome, position etc.) and the samples.
"""
return pd.read_csv(
str(file_path),
sep='\t',
dtype={'chr': 'str'},
comment='#',
index_col=['gene_id', 'chr', 'start', 'end', 'strand'])
def normalize_counts(counts):
# type: (pd.DataFrame) -> pd.DataFrame
"""Normalizes counts for sequencing depth using median-of-ratios.
Normalizes gene expression counts for differences in sequencing depth
between samples using the median-of-ratios approach described in DESeq2.
Parameters
----------
counts : pd.DataFrame
Matrix of gene counts, with genes along the rows and samples
along the columns.
Returns
-------
pd.DataFrame
Matrix of normalized expression counts.
"""
with np.errstate(divide='ignore'):
size_factors = estimate_size_factors(counts)
return counts / size_factors
def estimate_size_factors(counts):
# type: (pd.DataFrame) -> np.Array
"""Calculates size factors using median-of-ratios.
Calculates normalization factors for the median-of-ratios approach for
normalizing expression counts for differences in sequencing depths between
samples.
Parameters
----------
counts : pd.DataFrame
Matrix of gene counts, with genes along the rows and samples
along the columns.
Returns
-------
np.Array
Array of normalization factors, with one entry per sample.
"""
def _estimate_size_factors_col(counts, log_geo_means):
# type: (pd.DataFrame, np.Array) -> np.Array
log_counts = np.log(counts)
mask = np.isfinite(log_geo_means) & (counts > 0)
return np.exp(np.median((log_counts - log_geo_means)[mask]))
# Convert to float.
counts = counts.astype(float)
# Calculate size factors.
log_geo_means = np.mean(np.log(counts), axis=1)
size_factors = np.apply_along_axis(
_estimate_size_factors_col,
axis=0,
arr=counts,
log_geo_means=log_geo_means)
return size_factors
|
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.core.validators import MaxValueValidator, MinValueValidator
from datetime import datetime
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
# Create your models here.
class Stream(models.Model):
"""
This is a class for the Stream Object
Attributes:
channel: string that contains the source of the stream
service: string that contains the website where the source is coming from (youtube, twitch, etc)
live: boolean if the stream is live or not
viewers: integer that holds the amount of current viewers
"""
title = models.CharField(max_length = 100, default = 'New Stream')
description = models.TextField()
channel = models.CharField(max_length = 100, blank = True)
service = models.CharField(max_length = 100, blank = True)
live = models.BooleanField(default = False)
featured = models.BooleanField(default = False)
viewers = models.PositiveIntegerField(default = 0)
def __str__(self):
return self.title
@property
def streamer(self):
return self.profile
@property
def streamer_name(self):
return self.profile.user.username
class Profile(models.Model):
"""
This is a class for the Profiles Object
Attributes:
user: The user related to the profile. (FK)
stream: The stream related to the profile. (FK)
"""
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name = 'profile')
stream = models.OneToOneField(Stream, related_name = 'profile', on_delete=models.CASCADE)
def __str__(self):
return self.user.username
def __unicode__(self):
return self.user.username
@property
def name(self):
return self.user.username
@property
def uploads(self):
return self.uploads
class Note(models.Model):
"""
This is the class for the Notes object
Attributes:
title: string that holds the title of the Notes
user: the user who the Notes is uploaded by
service: the service in which the Notes is being linked from
link: the link to the Notes
noteType: the type of Notes
"""
NOTES_CHOICES = (
('video', 'video'),
('pdf', 'pdf'),
('csv', 'csv'),
('audio', 'audio'),
('compressed', 'compressed'),
('text', 'text')
)
title = models.CharField(max_length = 100)
description = models.TextField(blank = True)
uploader = models.ForeignKey(Profile, on_delete = models.CASCADE, related_name='uploads')
service = models.CharField(max_length = 100, blank = True)
link = models.CharField(max_length = 100, blank = True)
featured = models.BooleanField(default = False)
noteType = models.CharField(max_length = 15, choices = NOTES_CHOICES, default = 'text')
@property
def name(self):
return self.uploader.user.username
@receiver(post_save, sender = User)
def create_profile_for_new_user(sender, created, instance, **kwargs):
if created:
stream = Stream()
stream.save()
profile = Profile(user = instance, stream = stream)
profile.save() |
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
import os
import torch
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
save_dir = '/srv/glusterfs/csevim/datasets/emotione/checkpoints/inception_mood_emo_4d',
load_epoch = 7):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradient. Possibly useful
for finetuning the network
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
self.save_dir = save_dir
self.load_epoch = load_epoch
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
self.model = models.inception_v3(pretrained=False)
num_ftrs = self.model.AuxLogits.fc.in_features
layers = list()
layers.append(nn.Linear(in_features=num_ftrs, out_features=4))
layers.append(nn.Hardtanh())
self.model.AuxLogits.fc = nn.Sequential(*layers)
emo_layers = list()
emo_layers.append(nn.Linear(in_features=4, out_features=512))
emo_layers.append(nn.ReLU())
emo_layers.append(nn.Linear(in_features=512, out_features=8))
self.aux_emo_layer = nn.Sequential(*emo_layers)
num_ftrs = self.model.fc.in_features
layers = list()
layers.append(nn.Linear(in_features=num_ftrs, out_features=4))
layers.append(nn.Hardtanh())
self.model.fc = nn.Sequential(*layers)
emo_layers = list()
emo_layers.append(nn.Linear(in_features=4, out_features=512))
emo_layers.append(nn.ReLU())
emo_layers.append(nn.Linear(in_features=512, out_features=8))
self.emo_layer = nn.Sequential(*emo_layers)
self.model.to(self.device)
self.aux_emo_layer.to(self.device)
self.emo_layer.to(self.device)
self._load_network()
# Block 0: input to maxpool1
block0 = [
self.model.Conv2d_1a_3x3,
self.model.Conv2d_2a_3x3,
self.model.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
self.model.Conv2d_3b_1x1,
self.model.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
self.model.Mixed_5b,
self.model.Mixed_5c,
self.model.Mixed_5d,
self.model.Mixed_6a,
self.model.Mixed_6b,
self.model.Mixed_6c,
self.model.Mixed_6d,
self.model.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
self.model.Mixed_7a,
self.model.Mixed_7b,
self.model.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
#if self.normalize_input:
#x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
# x = x/255 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def _load_network(self):
load_filename = 'net_epoch_%s.pth' % self.load_epoch
load_path = os.path.join(self.save_dir, load_filename)
assert os.path.exists(
load_path), 'Weights file not found. Have you trained a model!? We are not providing one' % load_path
checkpoint = torch.load(load_path, map_location='cuda:0') if torch.cuda.is_available() else torch.load(load_path, map_location='cpu')
self.model.load_state_dict(checkpoint['model'])
self.emo_layer.load_state_dict(checkpoint['emo_layer'])
self.aux_emo_layer.load_state_dict(checkpoint['aux_emo_layer'])
print('loaded net: %s' % load_path)
|
"""
Command for getting helptext for another command
"""
from ..command import BaseCommand
class HelpCommand(BaseCommand):
"""
Displays help for a command
Required arguments:
If left empty will list all available commands on system
Otherwise pass in a command,
Command (i.e. !dankmemes)
Supported options:
None
"""
def __init__(self, message):
super(HelpCommand, self).__init__(message)
self._command = "!help"
def validate(self):
return True, "OK"
def run(self):
from ..all_commands import COMMANDS
if self._args:
return self.code_wrap(COMMANDS[self._args[0]].help())
else:
ret_str = "Usage: !help <command>\nPrint help information for the command specified"
ret_str += "\n\nAvailable Commands: \n"
for command, obj in COMMANDS.items():
# This will skip aliases
if not isinstance(obj, dict):
ret_str += "- " + command + " - " + obj.info() + "\n"
return self.code_wrap(ret_str)
@staticmethod
def info():
return "Displays help text for a command"
@staticmethod
def help():
return """
Displays help for a command
Required arguments:
Command (i.e. !dankmemes)
Supported options:
None
"""
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import re
import numpy as np
from ..fitter import initial_period
class TestFitter(object):
"""Test rv.fitter.
"""
def setup(self):
pass
def teardown(self):
pass
def test_initial_period(self):
"""Verify the construction of period vectors.
"""
foo = initial_period(N=2)
assert np.allclose(foo, np.array([6.28318531e-02, 6.28318531e-05]))
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pretrains a recurrent language model.
Computational time:
2 days to train 100000 steps on 1 layer 1024 hidden units LSTM,
256 embeddings, 400 truncated BP, 256 minibatch and on single GPU (Pascal
Titan X, cuDNNv5).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import tensorflow as tf
import graphs
import train_utils
FLAGS = tf.app.flags.FLAGS
def main(_):
"""Trains Language Model."""
tf.logging.set_verbosity(tf.logging.INFO)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
model = graphs.get_model()
train_op, loss, global_step = model.language_model_training()
train_utils.run_training(train_op, loss, global_step)
if __name__ == '__main__':
tf.app.run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# If your site is available via HTTPS, make sure SITEURL begins with https://
SITEURL = 'https://zentropi.org'
RELATIVE_URLS = False
SITENAME = 'zentropi.org'
SITETITLE = 'Zentropi.org'
SITESUBTITLE = 'Script Your World'
SITEDESCRIPTION = ''
SITELOGO = SITEURL + '/images/logo.png'
FAVICON = SITEURL + '/images/logo.png'
BROWSER_COLOR = '#333'
ROBOTS = 'index, follow'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = "" |
import unittest
#import Changeling
from mongodec.changeling import Changeling, replace_arg, convert_arg_soup
class TestChangeling(unittest.TestCase):
def test_Changeling(self):
""" Simple changeling test """
# Test that we can make a simple dummy class
return
class Foo(object):
def __init__(inst, val, prop=None):
inst.val = val
inst.prop = prop
def f(self, arg):
return self.val + arg
# and that this class works as expected
foo_instance = Foo(20, prop='harambe died for our sins')
self.assertEqual(foo_instance.f(12), 32)
# now make simple wrapper for Foo.f
def dummy_wrap(func, cdict, callargs):
return func(arg=callargs['arg'] + 400)
c_foo_instance = Changeling(foo_instance,
cdict={'Foo_methods': {'f': dummy_wrap}})
self.assertEqual(c_foo_instance.f(12), 432)
self.assertEqual(c_foo_instance.f(12, no_changeling=True), 32)
self.assertEqual(c_foo_instance.prop, 'harambe died for our sins')
c_wrap_all = Changeling(foo_instance,
cdict={'Foo_methods': {'f': dummy_wrap},
'Foo_wrap_all': dummy_wrap})
self.assertEqual(c_wrap_all.f(12), 832)
self.assertEqual(c_wrap_all.f(12, no_changeling=True), 32)
def test_convert_arg_soup(self):
""" Tests we can convert args/kwargs to just kwargs """
def test_func(arg1, arg2, kwarg1=None, kwarg2=None):
pass
callargs_1 = convert_arg_soup(test_func, 1, 2, 3, 4)
self.assertEqual(callargs_1, {'arg1': 1, 'arg2': 2,
'kwarg1': 3, 'kwarg2': 4})
callargs_2 = convert_arg_soup(test_func, 1, 2)
self.assertEqual(callargs_2, {'arg1': 1, 'arg2': 2,
'kwarg1': None, 'kwarg2': None})
callargs_3 = convert_arg_soup(test_func, 1, 2, kwarg2=4, kwarg1=3)
self.assertEqual(callargs_3, {'arg1': 1, 'arg2': 2,
'kwarg1': 3, 'kwarg2': 4})
def test_replace_arg(self):
""" Tests we can replace args by name """
def dummy_replacer(argname, callargs=None, cdict=None):
callargs = callargs or {}
callargs[argname] = (cdict or {}).get(argname, 420)
return callargs
def f(arg1, arg2):
return arg1 + arg2
self.assertEqual(f(1, 2), 3)
wrap_1 = replace_arg('arg1', dummy_replacer)
self.assertEqual(wrap_1(f, {'arg1': 2, 'arg2': 10}), 430)
wrap_2 = replace_arg('arg1', dummy_replacer, cdict={'arg1': 990})
self.assertEqual(wrap_2(f, {'arg1': 2, 'arg2': 10}), 1000)
if __name__ == '__main__':
unittest.main() |
from fastapi import Depends, APIRouter, Request
from starlette.responses import RedirectResponse
from loguru import logger
from app.internal.utils import get_current_user
from app.dependencies import get_db
from app.internal.google_connect import get_credentials, fetch_save_events
from app.routers.profile import router as profile
router = APIRouter(
prefix="/google",
tags=["sync"],
responses={404: {"description": "Not found"}},
)
@router.get("/sync")
async def google_sync(request: Request,
session=Depends(get_db)) -> RedirectResponse:
'''Sync with Google - if user never synced with google this funcion will take
the user to a consent screen to use his google calendar data with the app.
'''
user = get_current_user(session) # getting active user
# getting valid credentials
credentials = get_credentials(user=user, session=session)
if credentials is None:
# in case credentials is none, this is mean there isn't a client_secret
logger.error("GoogleSync isn't available - missing client_secret.json")
# fetch and save the events com from Google Calendar
fetch_save_events(credentials=credentials, user=user, session=session)
url = profile.url_path_for('profile')
return RedirectResponse(url=url)
|
"""Sample code for training models"""
from typing import Any, Mapping, Optional, Sequence, Tuple, Type
import numpy as np
import torch
from opacus import PrivacyEngine
from opacus.utils.batch_memory_manager import BatchMemoryManager
from opacus.validators import ModuleValidator
from tqdm import tqdm
from .data import IndexCachingBatchMemoryManager
from .utils import Logger, predict, validate_model
from .core import ( # isort:skip
latent_reweigh,
reweigh,
setup_adaptive_clipped_dpsgd,
setup_weighted_dpsgd,
create_teacher_loaders,
create_weighted_teacher_loaders,
laplacian_aggregator,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train_vanilla(
train_loader: torch.utils.data.DataLoader,
val_loader: Optional[torch.utils.data.DataLoader],
model_class: Type[torch.nn.Module],
optim_class: Type[torch.optim.Optimizer],
loss_fn: torch.nn.Module,
epochs: int,
**kwargs,
) -> Tuple[torch.nn.Module, Mapping[str, Sequence[Any]]]:
"""Train a model in the given environment.
Args:
train_loader (torch.utils.data.DataLoader):
The training data loader.
val_loader (Optional[torch.utils.data.DataLoader]):
The validation data loader. If None, validation is not performed.
model_class (Type[torch.nn.Module]):
The class of the model to be used during training.
optim_class (Type[torch.optim.Optimizer]):
The class of the optimizer to be used during training.
loss_fn (torch.nn.Module):
The loss function.
epochs (float):
Number of epochs to train for.
**kwargs:
Passed to optim_class constructor.
Returns:
Tuple[torch.nn.Module, Mapping[str, Sequence[Any]]]:
The trained model and a dictionary consisting of train-time metrics.
"""
model = model_class()
model.to(device)
train_loader = torch.utils.data.DataLoader(
train_loader.dataset, batch_size=train_loader.batch_size, shuffle=True
)
criterion = loss_fn
optimizer = optim_class(model.parameters(), **kwargs)
logger = Logger()
for _ in range(epochs):
model.train()
epoch_losses = []
epoch_accs = []
for batch in tqdm(train_loader):
optimizer.zero_grad()
images = batch[0].to(device)
target = batch[1].to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = (preds == labels).mean()
epoch_losses.append(loss.item())
epoch_accs.append(acc)
loss.backward()
optimizer.step()
logger.record(epoch_losses, epoch_accs)
if val_loader is not None:
logger.record_val(*validate_model(model, val_loader, criterion))
logger.log()
return model, logger.get_metrics()
def train_dpsgd(
train_loader: torch.utils.data.DataLoader,
val_loader: Optional[torch.utils.data.DataLoader],
model_class: Type[torch.nn.Module],
optim_class: Type[torch.optim.Optimizer],
loss_fn: torch.nn.Module,
target_epsilon: float,
target_delta: float,
max_grad_norm: float,
epochs: int,
max_physical_batch_size: int = 128,
**kwargs,
) -> Tuple[torch.nn.Module, Mapping[str, Sequence[Any]]]:
"""Train a model with DP-SGD in the given environment.
Args:
train_loader (torch.utils.data.DataLoader):
The training data loader.
val_loader (Optional[torch.utils.data.DataLoader]):
The validation data loader. If None, validation is not performed.
model_class (Type[torch.nn.Module]):
The class of the model to be used during training.
optim_class (Type[torch.optim.Optimizer]):
The class of the optimizer to be used during training.
loss_fn (torch.nn.Module):
The loss function.
target_epsilon (float):
The target epsilon for DP-SGD.
target_delta (float):
The target delta for DP-SGD.
max_grad_norm (float):
Gradient clipping bound for DP-SGD.
epochs (float):
Number of epochs to train for.
max_physical_batch_size (int, optional):
Maximum physical batch size for memory manager. Defaults to 128.
**kwargs:
Passed to optim_class constructor.
Returns:
Tuple[torch.nn.Module, Mapping[str, Sequence[Any]]]:
The trained model and a dictionary consisting of train-time metrics.
"""
model = model_class()
model = ModuleValidator.fix(model)
assert not ModuleValidator.validate(model, strict=False)
model = model.to(device)
criterion = loss_fn
optimizer = optim_class(model.parameters(), **kwargs)
privacy_engine = PrivacyEngine()
model, optimizer, train_loader = privacy_engine.make_private_with_epsilon(
module=model,
optimizer=optimizer,
data_loader=train_loader,
epochs=epochs,
target_epsilon=target_epsilon,
target_delta=target_delta,
max_grad_norm=max_grad_norm,
)
logger = Logger()
for _ in range(epochs):
model.train()
epoch_losses = []
epoch_accs = []
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=max_physical_batch_size,
optimizer=optimizer,
) as memory_safe_data_loader:
for batch in tqdm(memory_safe_data_loader):
optimizer.zero_grad()
images = batch[0].to(device)
target = batch[1].to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = (preds == labels).mean()
epoch_losses.append(loss.item())
epoch_accs.append(acc)
loss.backward()
optimizer.step()
epsilon = privacy_engine.get_epsilon(target_delta)
logger.record(epoch_losses, epoch_accs)
if val_loader is not None:
logger.record_val(*validate_model(model, val_loader, criterion))
logger.log(epsilon=epsilon, delta=target_delta)
return model, logger.get_metrics()
def train_dpsgd_weighted(
train_loader: torch.utils.data.DataLoader,
val_loader: Optional[torch.utils.data.DataLoader],
model_class: Type[torch.nn.Module],
optim_class: Type[torch.optim.Optimizer],
loss_fn: torch.nn.Module,
target_epsilon: float,
target_delta: float,
max_grad_norm: float,
epochs: int,
max_physical_batch_size: int = 128,
weighting: str = "latent",
vae: Optional[torch.nn.Module] = None,
weights: Optional[np.ndarray] = None,
labels: Optional[np.ndarray] = None,
alpha: float = 0.01,
k: int = 16,
**kwargs,
) -> Tuple[torch.nn.Module, Mapping[str, Sequence[Any]]]:
"""Train a model with DP-SGD-W in the given environment.
Args:
train_loader (torch.utils.data.DataLoader):
The training data loader.
val_loader (Optional[torch.utils.data.DataLoader]):
The validation data loader. If None, validation is not performed.
model_class (Type[torch.nn.Module]):
The class of the model to be used during training.
optim_class (Type[torch.optim.Optimizer]):
The class of the optimizer to be used during training.
loss_fn (torch.nn.Module):
The loss function.
target_epsilon (float):
The target epsilon for DP-SGD-W.
target_delta (float):
The target delta for DP-SGD-W.
max_grad_norm (float):
Gradient clipping bound for DP-SGD-W.
weighting (str):
The scheme to use for weighting the data.
Can be one of ["custom", "latent", "sensitive_attr"].
If set to "custom", then the weights must be provided in the `weights`
argument. If set to "latent", then the weights are computed using the VAE,
which must be provided in the `vae` argument. If set to "sensitive_attr",
then the weights are computed in order to rebalance the distribution of
the labels, which must be provided in the `labels` argument.
Defaults to "latent".
epochs (int):
The number of epochs to train for.
max_physical_batch_size (int, optional):
Maximum physical batch size for memory manager. Defaults to 128.
vae (Optional[torch.nn.Module]):
The VAE to use for weighting if weighting is set to "latent".
Defaults to None.
weights (np.ndarray, optional):
The weights to use for the reweighing if weighting is set to "custom".
Defaults to None.
labels (np.ndarray, optional):
The labels to use for the reweighing if weighting is set to "sensitive_attr".
alpha (float):
The weight smoothing parameter for latent reweighing if weighting is
set to "latent". Defaults to 0.01.
k (int):
The number of latent bins for latent reweighing if weighing is set
to "latent". Defaults to 16.
**kwargs:
Passed to optim_class constructor.
Returns:
Tuple[torch.nn.Module, Mapping[str, Sequence[Any]]]:
The trained model and a dictionary consisting of train-time metrics.
"""
print("Reweighing...")
if weighting == "latent":
if vae is None:
raise ValueError("vae cannot be None if weighting is 'latent'")
weights = latent_reweigh(train_loader, vae, alpha=alpha, k=k)
elif weighting == "sensitive_attr":
if labels is None:
raise ValueError("labels cannot be None if weighting is 'sensitive_attr'")
weights = reweigh(labels)
elif weighting != "custom":
raise ValueError(
"weighting must be one of ['latent', 'sensitive_attr', 'custom']"
)
model = model_class()
model = ModuleValidator.fix(model)
assert not ModuleValidator.validate(model, strict=False)
model.to(device)
criterion = loss_fn
optimizer = optim_class(model.parameters(), **kwargs)
train_loader, model, optimizer, accountant = setup_weighted_dpsgd(
data_loader=train_loader,
model=model,
optimizer=optimizer,
weights=weights,
target_epsilon=target_epsilon,
target_delta=target_delta,
max_grad_norm=max_grad_norm,
epochs=epochs,
)
logger = Logger()
print("Training Model...")
for _ in range(epochs):
model.train()
epoch_losses = []
epoch_accs = []
with BatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=max_physical_batch_size,
optimizer=optimizer,
) as memory_safe_data_loader:
for batch in tqdm(memory_safe_data_loader):
optimizer.zero_grad()
images = batch[0].to(device)
target = batch[1].to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = (preds == labels).mean()
epoch_losses.append(loss.item())
epoch_accs.append(acc)
loss.backward()
optimizer.step()
epsilon = accountant.get_epsilon(target_delta)
logger.record(epoch_losses, epoch_accs)
if val_loader is not None:
logger.record_val(*validate_model(model, val_loader, criterion))
logger.log(epsilon=epsilon, delta=target_delta)
return model, logger.get_metrics()
def train_dpsgdf(
train_loader: torch.utils.data.DataLoader,
val_loader: Optional[torch.utils.data.DataLoader],
model_class: Type[torch.nn.Module],
optim_class: Type[torch.optim.Optimizer],
loss_fn: torch.nn.Module,
target_epsilon: float,
target_delta: float,
base_clipping_threshold: float,
epochs: int,
group_labels: torch.Tensor,
max_physical_batch_size: int = 512,
log_thresholds: bool = True,
**kwargs,
) -> Tuple[torch.nn.Module, Mapping[str, Any]]:
"""Train a model with DPSGD-F in the given environment.
Note that if the lot size is larger than `max_physical_batch_size`, the
batch memory manager will split it into two lots which will be processed
separately by the adaptive clipper.
Args:
train_loader (torch.utils.data.DataLoader):
The training data loader.
val_loader (Optional[torch.utils.data.DataLoader]):
The validation data loader. If None, validation is not performed.
model_class (Type[torch.nn.Module]):
The class of the model to be used during training.
optim_class (Type[torch.optim.Optimizer]):
The class of the optimizer to be used during training.
loss_fn (torch.nn.Module):
The loss function.
target_epsilon (float):
The target epsilon for DP-SGD.
target_delta (float):
The target delta for DP-SGD.
base_clipping_threshold (float):
Base gradient clipping bound for DP-SGD.
epochs (float):
Number of epochs to train for.
group_labels (torch.Tensor):
The group labels for the data.
max_physical_batch_size (int, optional):
Maximum physical batch size for memory manager. Defaults to 128.
log_thresholds (bool, optional):
Logs the clipping thresholds at each iteration, storing them in class
variable ``DPSGDFOptimizer.thresholds``. Defaults to True.
**kwargs:
Passed to optim_class constructor.
Returns:
Tuple[torch.nn.Module, Mapping[str, Any]]:
The trained model and a dictionary consisting of train-time metrics.
"""
model = model_class()
model = ModuleValidator.fix(model)
assert not ModuleValidator.validate(model, strict=False)
model = model.to(device)
criterion = loss_fn
optimizer = optim_class(model.parameters(), **kwargs)
n_groups = group_labels.max() + 1
if not torch.all(torch.unique(group_labels) == torch.arange(n_groups)):
raise ValueError(
"Group labels must be unique and sequential starting from 0. \
i.e. 0, 1, 2, ..."
)
train_loader, model, optimizer, accountant = setup_adaptive_clipped_dpsgd(
data_loader=train_loader,
model=model,
optimizer=optimizer,
target_epsilon=target_epsilon,
target_delta=target_delta,
base_clipping_threshold=base_clipping_threshold,
epochs=epochs,
n_groups=n_groups,
log_thresholds=log_thresholds,
)
logger = Logger()
print("Training Model...")
for _ in range(epochs):
model.train()
epoch_losses = []
epoch_accs = []
with IndexCachingBatchMemoryManager(
data_loader=train_loader,
max_physical_batch_size=max_physical_batch_size,
optimizer=optimizer,
) as memory_safe_data_loader:
for batch in tqdm(memory_safe_data_loader):
optimizer.zero_grad()
idxs = memory_safe_data_loader.get_indices()
batch_groups = group_labels[idxs].to(device)
images = batch[0].to(device)
target = batch[1].to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = (preds == labels).mean()
epoch_losses.append(loss.item())
epoch_accs.append(acc)
loss.backward()
optimizer.set_batch_params(group_labels=batch_groups)
optimizer.step()
epsilon = accountant.get_epsilon(target_delta)
logger.record(epoch_losses, epoch_accs)
if val_loader is not None:
logger.record_val(*validate_model(model, val_loader, criterion))
logger.log(epsilon=epsilon, delta=target_delta)
logger.set_metric(thresholds=optimizer.logged_thresholds)
return model, logger.get_metrics()
def train_pate(
train_loader: torch.utils.data.DataLoader,
val_loader: Optional[torch.utils.data.DataLoader],
student_loader: torch.utils.data.DataLoader,
model_class: Type[torch.nn.Module],
optim_class: Type[torch.optim.Optimizer],
loss_fn: torch.nn.Module,
n_teachers: int,
target_epsilon: float,
target_delta: float,
epochs: int,
**kwargs,
) -> Tuple[torch.nn.Module, Mapping[str, Any]]:
"""Train a model with PATE in the given environment.
Args:
train_loader (torch.utils.data.DataLoader):
The training dataloader used to train the teacher ensemble model.
val_loader (Optional[torch.utils.data.DataLoader]):
The validation data loader. If None, validation is not performed.
student_loader (torch.utils.data.DataLoader):
The training dataloader for the public data used to train the student model.
model_class (Type[torch.nn.Module]):
The class of the model to be used during training.
optim_class (Type[torch.optim.Optimizer]):
The class of the optimizer to be used during training.
loss_fn (torch.nn.Module):
The loss function.
n_teachers (int):
The number of teachers to use in the ensemble.
target_epsilon (float):
The target epsilon for DP-SGD-W.
target_delta (float):
The target delta for DP-SGD-W.
epochs (int):
The number of epochs to train for.
**kwargs:
Passed to optim_class constructor.
Returns:
Tuple[torch.nn.Module, Mapping[str, Any]]:
The trained model and a dictionary consisting of train-time metrics.
"""
teacher_loaders = create_teacher_loaders(
dataset=train_loader.dataset,
n_teachers=n_teachers,
batch_size=train_loader.batch_size,
)
criterion = loss_fn
teachers = []
teacher_metrics = []
print(f"Training {n_teachers} Teacher Models...")
for i in range(n_teachers):
model = model_class()
model.to(device)
optimizer = optim_class(model.parameters(), **kwargs)
model.train()
losses = []
accs = []
for _ in tqdm(range(epochs)):
epoch_losses = []
epoch_accs = []
for batch in teacher_loaders[i]:
optimizer.zero_grad()
images = batch[0].to(device)
target = batch[1].to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = (preds == labels).mean()
epoch_losses.append(loss.item())
epoch_accs.append(acc)
loss.backward()
optimizer.step()
losses.append(np.mean(epoch_losses))
accs.append(np.mean(epoch_accs))
if val_loader is not None:
val_losses, val_accs = validate_model(model, val_loader, criterion)
val_loss = np.mean(val_losses)
val_acc = np.mean(val_accs)
print(
f"Teacher Model: {i + 1}",
f"Loss: {losses[-1]:.2f}",
f"Acc@1: {accs[-1] * 100:.2f}",
f"Val Loss: {val_loss:.2f}" if val_loader is not None else "",
f"Val Acc@1: {val_acc * 100:.2f}" if val_loader is not None else "",
)
teachers.append(model.cpu())
teacher_metrics.append(
{
"loss": losses[-1],
"acc": accs[-1],
"val_loss": val_loss if val_loader is not None else None,
"val_acc": val_acc if val_loader is not None else None,
}
)
print("Aggregating Teachers...")
n_train_student = len(student_loader.dataset)
teacher_preds = torch.zeros((n_teachers, n_train_student), dtype=torch.long)
for i, model in enumerate(tqdm(teachers)):
teacher_preds[i] = predict(model, student_loader)
labels = laplacian_aggregator(teacher_preds, target_epsilon)
def gen_student_loader(student_loader, labels):
for i, batch in enumerate(iter(student_loader)):
yield batch[0], labels[i * len(batch[0]) : (i + 1) * len(batch[0])]
student_model = model_class()
student_model.to(device)
optimizer = optim_class(student_model.parameters(), **kwargs)
logger = Logger()
print("Training Student Model...")
for _ in range(epochs):
student_model.train()
epoch_losses = []
epoch_accs = []
generator = gen_student_loader(student_loader, labels)
for images, target in tqdm(generator, total=len(student_loader)):
optimizer.zero_grad()
images = images.to(device)
target = target.to(device)
output = student_model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
lbls = target.detach().cpu().numpy()
acc = (preds == lbls).mean()
epoch_losses.append(loss.item())
epoch_accs.append(acc)
loss.backward()
optimizer.step()
logger.record(epoch_losses, epoch_accs)
if val_loader is not None:
logger.record_val(*validate_model(student_model, val_loader, criterion))
logger.log()
logger.set_metric(teacher_metrics=teacher_metrics)
return student_model, logger.get_metrics()
def train_reweighed_sftpate(
train_loader: torch.utils.data.DataLoader,
val_loader: Optional[torch.utils.data.DataLoader],
student_loader: torch.utils.data.DataLoader,
model_class: Type[torch.nn.Module],
optim_class: Type[torch.optim.Optimizer],
loss_fn: torch.nn.Module,
n_teachers: int,
target_epsilon: float,
target_delta: float,
epochs: int,
weights: np.ndarray,
**kwargs,
) -> Tuple[torch.nn.Module, Mapping[str, Any]]:
"""Train a model with SF_T-PATE in the given environment.
Args:
train_loader (torch.utils.data.DataLoader):
The training dataloader used to train the teacher ensemble model.
val_loader (Optional[torch.utils.data.DataLoader]):
The validation data loader. If None, validation is not performed.
student_loader (torch.utils.data.DataLoader):
The training dataloader for the public data used to train the student model.
model_class (Type[torch.nn.Module]):
The class of the model to be used during training.
optim_class (Type[torch.optim.Optimizer]):
The class of the optimizer to be used during training.
loss_fn (torch.nn.Module):
The loss function.
n_teachers (int):
The number of teachers to use in the ensemble.
target_epsilon (float):
The target epsilon for DP-SGD-W.
target_delta (float):
The target delta for DP-SGD-W.
epochs (int):
The number of epochs to train for.
weights (np.ndarray):
The weights to use for reweighing the teacher ensemble.
**kwargs:
Passed to optim_class constructor.
Returns:
Tuple[torch.nn.Module, Mapping[str, Any]]:
The trained model and a dictionary consisting of train-time metrics.
"""
teacher_loaders = create_weighted_teacher_loaders(
dataset=train_loader.dataset,
n_teachers=n_teachers,
batch_size=train_loader.batch_size,
weights=weights,
)
criterion = loss_fn
teachers = []
teacher_metrics = []
print(f"Training {n_teachers} Teacher Models...")
for i in range(n_teachers):
model = model_class()
model.to(device)
optimizer = optim_class(model.parameters(), **kwargs)
model.train()
losses = []
accs = []
for _ in tqdm(range(epochs)):
epoch_losses = []
epoch_accs = []
for batch in teacher_loaders[i]:
optimizer.zero_grad()
images = batch[0].to(device)
target = batch[1].to(device)
output = model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
labels = target.detach().cpu().numpy()
acc = (preds == labels).mean()
epoch_losses.append(loss.item())
epoch_accs.append(acc)
loss.backward()
optimizer.step()
losses.append(np.mean(epoch_losses))
accs.append(np.mean(epoch_accs))
if val_loader is not None:
val_losses, val_accs = validate_model(model, val_loader, criterion)
val_loss = np.mean(val_losses)
val_acc = np.mean(val_accs)
print(
f"Teacher Model: {i + 1}",
f"Loss: {losses[-1]:.2f}",
f"Acc@1: {accs[-1] * 100:.2f}",
f"Val Loss: {val_loss:.2f}" if val_loader is not None else "",
f"Val Acc@1: {val_acc * 100:.2f}" if val_loader is not None else "",
)
teachers.append(model.cpu())
teacher_metrics.append(
{
"loss": losses[-1],
"acc": accs[-1],
"val_loss": val_loss if val_loader is not None else None,
"val_acc": val_acc if val_loader is not None else None,
}
)
print("Aggregating Teachers...")
n_train_student = len(student_loader.dataset)
teacher_preds = torch.zeros((n_teachers, n_train_student), dtype=torch.long)
for i, model in enumerate(tqdm(teachers)):
teacher_preds[i] = predict(model, student_loader)
labels = laplacian_aggregator(teacher_preds, target_epsilon)
def gen_student_loader(student_loader, labels):
for i, batch in enumerate(iter(student_loader)):
yield batch[0], labels[i * len(batch[0]) : (i + 1) * len(batch[0])]
student_model = model_class()
student_model.to(device)
optimizer = optim_class(student_model.parameters(), **kwargs)
logger = Logger()
print("Training Student Model...")
for _ in range(epochs):
student_model.train()
epoch_losses = []
epoch_accs = []
generator = gen_student_loader(student_loader, labels)
for images, target in tqdm(generator, total=len(student_loader)):
optimizer.zero_grad()
images = images.to(device)
target = target.to(device)
output = student_model(images)
loss = criterion(output, target)
preds = np.argmax(output.detach().cpu().numpy(), axis=1)
lbls = target.detach().cpu().numpy()
acc = (preds == lbls).mean()
epoch_losses.append(loss.item())
epoch_accs.append(acc)
loss.backward()
optimizer.step()
logger.record(epoch_losses, epoch_accs)
if val_loader is not None:
logger.record_val(*validate_model(student_model, val_loader, criterion))
logger.log()
logger.set_metric(teacher_metrics=teacher_metrics)
return student_model, logger.get_metrics()
|
# ==================option.test_options.py=====================
# This module adds some test options for the total project.
# Version: 1.0.0
# Date: 2019.08.07
# =============================================================
import os
from option import BaseOptions
###############################################################
# Test Options Class
###############################################################
class TestOptions(BaseOptions):
"""This class includes testing options. It also includes
shared options defined in BaseOptions.
Inputs:
cfg:
Examples:
use path:
<<< cfg = option.TestOptions()
cfg.Test_DATA_DIR
use parameter:
<<< cfg = option.TestOptions()
cfg.opts.model_name
"""
def __init__(self):
super(TestOptions, self).__init__()
self.mode = "Test"
self.opts = self.add_parser()
# redefined the path
self.CHECKPOINT_DIR = os.path.join(self.CHECKPOINT_DIR, self.opts.model_name + '_' + self.opts.dataroot)
self.DATA_DIR = os.path.join(self.DATA_DIR, self.opts.dataroot)
self.TEST_DATA_DIR = os.path.join(self.DATA_DIR, 'test')
self.TEST_LABEL_DIR = os.path.join(self.DATA_DIR, self.opts.test_label) \
if self.opts.test_label != 'None' else 'None'
assert self.opts.load_checkpoint != 'scratch', "[Error] You must load checkpoint file in Test mode!"
# Display and save the path or parameter of paths
self.display_options(self.opts, self.opts.display_path, self.opts.display_param)
def add_parser(self):
# Dataset base options
self.parser.add_argument('--dataroot', type=str, default='multi_class_demo',
help="your dataset file name")
self.parser.add_argument('--test_label', type=str, default='test_split.csv',
help="(.csv or .json)test file name if it has a test label")
# Test base options
self.parser.add_argument('--num_test', type=int, default=120,
help='how many test images to run')
# load checkpoints
self.parser.add_argument('--load_checkpoint', type=str, default='best',
help=">>> testing from load [index | best | checkpoint_name.pth]")
# Checkpoints save options
self.parser.add_argument('--save_test_log', type=bool, default=True,
help="whether save the training loss and acc log to the disk")
# TODO(User) >>> add your own base parse
# parser.add_argument()
# TODO(User): End
opt = self.parser.parse_args()
return opt
|
"""import special form
Exmaples:
(import std.io.petr)
"""
from types import ModuleType
from typing import List
import os
import importlib
import importlib.util
import inspect
from mplisp import evaluator
from mplisp.structures import env
def import_module(args: List, node):
"""
Import all functions from a module and add corresponding symbols
to the parent environment.
"""
module_name = evaluator.evaluate_node(args[0])
if not isinstance(module_name, str):
evaluator.error("(special_forms.module_import.import) 1st param must be of type str", node)
local_path = module_name + ".mplisp"
absolute_path = os.path.join("/usr/lib/mplisp", local_path)
path = ''
if os.path.isfile(local_path):
path = local_path
elif os.path.isfile(absolute_path):
path = absolute_path
if path:
with open(local_path) as file_object:
list(evaluator.evaluate(file_object.read(), node.getenv()))
return None
mplispstd = module_name.replace("std", "mplispstd")
if module_exists(mplispstd):
module_name = mplispstd
elif not module_exists(module_name):
evaluator.error("(special_forms.module_import.import) package {} not found".format(module_name), node)
mod = importlib.import_module(module_name)
return mod
def python_getattr(args: List, node):
"""Call python function from mplisp"""
if len(args) != 2:
evaluator.error("(special_forms.module_import.get_attribute) 2 parameters expected, {} given".format(len(args)), node)
params = evaluator.evaluate_parallel_args(args)
if not isinstance(params[0], ModuleType):
params[0] = import_module(args[0:1], node)
if not isinstance(params[1], str):
evaluator.error("(special_forms.module_import.get_attribute) 2st param must be of type str, {} given".format(type(params[1])), node)
attr = getattr(params[0], params[1])
if callable(attr):
return function_caller(attr)
return attr
def function_caller(func_obj):
def func(local_args: List, _):
"""Callable object"""
params = evaluator.evaluate_parallel_args(local_args)
return func_obj(*params)
return func
def load_module_functions(module):
"""Get all module functions"""
return inspect.getmembers(module, inspect.isfunction)
def module_exists(module_name):
"""Does module exists"""
return importlib.util.find_spec(module_name) is not None
|
import data
import datetime
import pytest
def test_fetching_activity():
activity_data = data.fetch_issue_activity("equinor/gathering-leto")
assert len(activity_data) > 0
assert "closed" in tuple(activity_data.columns)
assert "date" in tuple(activity_data.columns)
assert "open" in tuple(activity_data.columns)
assert all(activity_data["date"].sort_values() == activity_data["date"])
prev_open = 0 # Minimum open value
prev_closed = 0 # Minimum closed value
for dx, row in activity_data.iterrows():
assert isinstance(row["date"], datetime.datetime)
assert row["open"] >= 0
# Both closed and combined should be increasing sequences
assert row["closed"] >= prev_closed
assert row["open"] + row["closed"] >= prev_open + prev_closed
prev_open, prev_closed = row["open"], row["closed"]
|
#Faça um Programa que verifique se uma letra digitada é "F" ou "M". Conforme a letra escrever:
# F - Feminino, M - Masculino, Sexo Inválido.
sex = str(input('Digite F ou M:\n').upper().strip())
if sex == 'F' or sex == 'M':
if sex == 'F':
print('F = Feminino')
else:
print('M = Masculino')
else:
print('Não é uma resposta valida!!') |
# -*- coding: utf8 -*-
__title__ = "FreeCAD Autodesk 3DS Max importer"
__author__ = "Jens M. Plonka"
__url__ = "https://www.github.com/jmplonka/Importer3D"
import FreeCAD, triangulate, numpy, zlib, sys, traceback
from importUtils import missingDependency, canImport, newObject, getValidName, getByte, getShorts, getShort, getInts, getInt, getFloats, getFloat, setEndianess, LITTLE_ENDIAN
from math import degrees
from struct import Struct, unpack
from BasicShapes import Shapes, ViewProviderShapes
try:
import olefile
except:
missingDependency("olefile")
UNPACK_BOX_DATA = Struct('<hihhbff').unpack_from # Index, int, short, short, byte, float, Length
DEBUG = False # Dump chunk content to console?
TYP_NAME = 0x0962
CLS_DATA = []
CONFIG = []
DLL_DIR_LIST = []
CLS_DIR3_LIST = []
VID_PST_QUE = []
SCENE_LIST = []
SKIPPABLE = {
0x0000000000001002: 'Camera',
0x0000000000001011: 'Omni',
0x0000000000001013: 'Free Direct',
0x0000000000001020: 'Camera Target',
0x0000000000001040: 'Line',
0x0000000000001065: 'Rectangle',
0x0000000000001097: 'Ellipse',
0x0000000000001999: 'Circle',
0x0000000000002013: 'Point',
0x0000000000009125: 'Biped Object',
0x0000000000876234: 'Dummy',
0x05622B0D69011E82: 'Compass',
0x12A822FB76A11646: 'CV Surface',
0x1EB3430074F93B07: 'Particle View',
0x2ECCA84028BF6E8D: 'Bone',
0x3BDB0E0C628140F6: 'VRayPlane',
0x4E9B599047DB14EF: 'Slider',
0x522E47057BF61478: 'Sky',
0x5FD602DF3C5575A1: 'VRayLight',
0x77566F65081F1DFC: 'Plane',
}
class AbstractChunk():
def __init__(self, type, size, level, number):
self.number = number
self.type = type
self.level = level
self.parent = None
self.previous = None
self.next = None
self.size = size
self.unknown = True
self.format = None
self.data = None
self.resolved = False
def __str__(self):
if (self.unknown == True):
return "%s[%4x] %04X: %s" %(" "*self.level, self.number, self.type, ":".join("%02x"%(c) for c in self.data))
return "%s[%4x] %04X: %s=%s" %(" "*self.level, self.number, self.type, self.format, self.data)
class ByteArrayChunk(AbstractChunk):
def __init__(self, type, data, level, number): AbstractChunk.__init__(self, type, data, level, number)
def set(self, data, name, format, start, end):
try:
self.data = unpack(format, data[start:end])
self.format = name
self.unknown = False
except Exception as e:
self.data = data
def setStr16(self, data):
try:
self.data = data.decode('UTF-16LE')
self.format = "Str16"
self.unknown = False
except:
self.data = data
def setLStr16(self, data):
try:
l, o = getInt(data, 0)
self.data = data[o:o+l*2].decode('utf-16-le')
if (self.data[-1] == b'\0'): self.data = self.data[0:-1]
self.format = "LStr16"
self.unknown = False
except:
self.data = data
def setData(self, data):
if (self.type in [0x0340, 0x4001, 0x0456, 0x0962]): self.setStr16(data)
elif (self.type in [0x2034, 0x2035]): self.set(data, "int{}", '<' + 'I'*int(len(data)/4), 0, len(data))
elif (self.type in [0x2501, 0x2503, 0x2504, 0x2505, 0x2511]): self.set(data, "float[]", '<' + 'f'*int(len(data)/4), 0, len(data))
elif (self.type == 0x2510): self.set(data, "struct", '<' + 'f'*int(len(data)/4 - 1) + 'i', 0, len(data))
elif (self.type == 0x0100): self.set(data, "float", '<f' , 0, len(data))
else:
self.unknown = True
self.data = data
try:
if (DEBUG): FreeCAD.Console.PrintMessage("%s\n" %(self))
except Exception as e:
self.format = None
self.unknown = True
self.data = ":".join("%02x"%(c) for c in data)
if (DEBUG): FreeCAD.Console.PrintMessage("%s\n" %(self))
class ClsDir3Chunk(ByteArrayChunk):
def __init__(self, type, data, level, number):
AbstractChunk.__init__(self, type, data, level, number)
self.dll = None
def setData(self, data):
if (self.type == 0x2042): self.setStr16(data) # ClsName
elif (self.type == 0x2060): self.set(data, "struct", '<IQI', 0, 16) # DllIndex, ID, SuperID
else:
self.unknown = False
self.data = ":".join("%02x"%(c) for c in data)
try:
if (DEBUG): FreeCAD.Console.PrintMessage("%s\n" %(self))
except:
self.format = None
self.unknown = False
self.data = data
if (DEBUG): FreeCAD.Console.PrintMessage("%s\n" %(self))
class DllDirChunk(ByteArrayChunk):
def __init__(self, type, data, level, number): AbstractChunk.__init__(self, type, data, level, number)
def setData(self, data):
if (self.type == 0x2039): self.setStr16(data)
elif (self.type == 0x2037): self.setStr16(data)
try:
if (DEBUG): FreeCAD.Console.PrintMessage("%s\n" %(self))
except:
self.format = None
self.unknown = False
self.data = ":".join("%02x"%(c) for c in data)
if (DEBUG): FreeCAD.Console.PrintMessage("%s\n" %(self))
class ContainerChunk(AbstractChunk):
def __init__(self, type, data, level, number, primitiveReader=ByteArrayChunk):
AbstractChunk.__init__(self, type, data, level, number)
self.primitiveReader = primitiveReader
def __str__(self):
if (self.unknown == True):
return "%s[%4x] %04X" %(" "*self.level, self.number, self.type)
return "%s[%4x] %04X: %s" %(" "*self.level, self.number, self.type, self.format)
def getFirst(self, type):
for child in self.children:
if (child.type == type): return child
return None
def setData(self, data):
previous = None
next = None
reader = ChunkReader()
if (DEBUG): FreeCAD.Console.PrintMessage("%s\n" %(self))
self.children = reader.getChunks(data, self.level + 1, ContainerChunk, self.primitiveReader)
class SceneChunk(ContainerChunk):
def __init__(self, type, data, level, number, primitiveReader=ByteArrayChunk):
AbstractChunk.__init__(self, type, data, level, number)
self.primitiveReader = primitiveReader
self.matrix = None
def __str__(self):
if (self.unknown == True):
return "%s[%4x] %s" %(" "*self.level, self.number, getClsName(self))
return "%s[%4x] %s: %s" %(" "*self.level, self.number, getClsName(self), self.format)
def setData(self, data):
previous = None
next = None
if (DEBUG): FreeCAD.Console.PrintMessage("%s\n" %(self))
reader = ChunkReader()
self.children = reader.getChunks(data, self.level + 1, SceneChunk, ByteArrayChunk)
class ChunkReader():
def __init__(self, name = None): self.name = name
def getChunks(self, data, level, containerReader, primitiveReader):
chunks = []
offset = 0
if (level == 0):
t, o = getShort(data, 0)
l, o = getInt(data, o)
if (t == 0x8B1F):
t, o = getInt(data, o)
if (t == 0x0B000000):
data = zlib.decompress(data, zlib.MAX_WBITS|32)
if (level==0):
progressbar = FreeCAD.Base.ProgressIndicator()
progressbar.start(" reading '%s'..."%self.name, len(data))
while offset < len(data):
old = offset
offset, chunk = self.getNextChunk(data, offset, level, len(chunks), containerReader, primitiveReader)
if (level==0):
for i in range(offset - old):
progressbar.next()
chunks.append(chunk)
if (level==0): progressbar.stop()
return chunks
def getNextChunk(self, data, offset, level, number, containerReader, primitiveReader):
header = 6
typ, siz, = unpack("<Hi", data[offset:offset+header])
chunkSize = siz & 0x7FFFFFFF
if (siz == 0):
siz, = unpack("<q", data[offset+header:offset+header+8])
header += 8
chunkSize = siz & 0x7FFFFFFFFFFFFFFF
if (siz < 0):
chunk = containerReader(typ, chunkSize, level, number, primitiveReader)
else:
chunk = primitiveReader(typ, chunkSize, level, number)
chunkData = data[offset + header:offset + chunkSize]
chunk.setData(chunkData)
return offset + chunkSize, chunk
class PointNi3s():
def __init__(self):
self.points = None
self.flags = 0
self.fH = 0
self.f1 = 0
self.f2 = 0
self.fA = []
def __str__(self):
return "[%s] - %X, %X, %X, [%s]" %('/'.join("%d" %p for p in self.points), self.fH, self.f1, self.f2, ','.join("%X" %f for f in self.fA))
class Material():
def __init__(self):
self.data = {}
def set(self, name, value): self.data[name] = value
def get(self, name, default=None):
value = None
if (name in self.data): value = self.data[name]
if (value is None): return default
return value
def getNode(index):
global SCENE_LIST
if (index < len(SCENE_LIST[0].children)):
return SCENE_LIST[0].children[index]
return None
def getNodeParent(node):
parent = None
if (node):
chunk = node.getFirst(0x0960)
if (chunk is not None):
idx, offset = getInt(chunk.data, 0)
parent = getNode(idx)
if (parent is None):
FreeCAD.Console.PrintError("parent index %X < %X!\n" %(idx, len(SCENE_LIST)))
return parent
def getNodeName(node):
if (node):
name = node.getFirst(TYP_NAME)
if (name): return name.data
return None
def getClass(chunk):
global CLS_DIR3_LIST
if (chunk.type < len(CLS_DIR3_LIST)):
return CLS_DIR3_LIST[chunk.type]
return None
def getDll(container):
global DLL_DIR_LIST
idx = container.getFirst(0x2060).data[0]
if (idx < len(DLL_DIR_LIST)):
return DLL_DIR_LIST[idx]
return None
def getGUID(chunk):
cls = getClass(chunk)
if (cls): return cls.getFirst(0x2060).data[1]
return chunk.type
def getSuperId(chunk):
cls = getClass(chunk)
if (cls): return cls.getFirst(0x2060).data[2]
return None
def getClsName(chunk):
cls = getClass(chunk)
if (cls):
clsName = cls.getFirst(0x2042).data
try:
return "'%s'" %(clsName)
except:
return "'%r'" %(clsName)
return u"%04X" %(chunk.type)
def getReferences(chunk):
references = []
refs = chunk.getFirst(0x2034)
if (refs):
references = [getNode(idx) for idx in refs.data]
return references
def getTypedRefernces(chunk):
references = {}
refs = chunk.getFirst(0x2035)
if (refs):
type = refs.data[0]
offset = 1
while offset < len(refs.data):
key = refs.data[offset]
offset += 1
idx = refs.data[offset]
offset += 1
references[key] = getNode(idx)
return references
def readChunks(ole, name, fileName, containerReader=ContainerChunk, primitiveReader=ByteArrayChunk):
with ole.openstream(name) as file:
scene = file.read()
# with open(fileName, 'wb') as file:
# file.write(scene)
reader = ChunkReader(name)
return reader.getChunks(scene, 0, containerReader, primitiveReader)
def readClassData(ole, fileName):
global CLS_DATA
CLS_DATA = readChunks(ole, 'ClassData', fileName+'.ClsDat.bin')
def readClassDirectory3(ole, fileName):
global CLS_DIR3_LIST
try:
CLS_DIR3_LIST = readChunks(ole, 'ClassDirectory3', fileName+'.ClsDir3.bin', ContainerChunk, ClsDir3Chunk)
except:
CLS_DIR3_LIST = readChunks(ole, 'ClassDirectory', fileName+'.ClsDir.bin', ContainerChunk, ClsDir3Chunk)
for clsDir in CLS_DIR3_LIST:
clsDir.dll = getDll(clsDir)
def readConfig(ole, fileName):
global CONFIG
CONFIG = readChunks(ole, 'Config', fileName+'.Cnf.bin')
def readDllDirectory(ole, fileName):
global DLL_DIR_LIST
DLL_DIR_LIST = readChunks(ole, 'DllDirectory', fileName+'.DllDir.bin', ContainerChunk, DllDirChunk)
def readVideoPostQueue(ole, fileName):
global VID_PST_QUE
VID_PST_QUE = readChunks(ole, 'VideoPostQueue', fileName+'.VidPstQue.bin')
def getPoint(float, default = 0.0):
uid = getGUID(float)
if (uid == 0x2007): # Bezier-Float
f = float.getFirst(0x7127)
if (f):
try:
return f.getFirst(0x2501).data[0]
except:
FreeCAD.Console.PrintWarning("SyntaxError: %s - assuming 0.0!\n" %(float))
return default
if (uid == 0x71F11549498702E7): # Float Wire
f = getReferences(float)[0]
return getPoint(f)
else:
FreeCAD.Console.PrintError("Unknown float type 0x%04X=%s!\n" %(uid, float))
return default
def getPoint3D(chunk, default=0.0):
floats = []
if (chunk):
refs = getReferences(chunk)
for float in refs:
f = getPoint(float, default)
if (f is not None):
floats.append(f)
return floats
def getPosition(pos):
mtx = numpy.identity(4, numpy.float32)
if (pos):
uid = getGUID(pos)
if (uid == 0xFFEE238A118F7E02): # => Position XYZ
pos = getPoint3D(pos)
elif (uid == 0x0000000000442312): # => TCB Position
pos = pos.getFirst(0x2503).data
elif (uid == 0x0000000000002008): # => Bezier Position
pos = pos.getFirst(0x2503).data
else:
pos = None
FreeCAD.Console.PrintError("Unknown position 0x%04X=%s!\n" %(uid, pos))
if (pos):
mtx[0,3] = pos[0]
mtx[1,3] = pos[1]
mtx[2,3] = pos[2]
return mtx
def getRotation(pos):
r = None
mtx = numpy.identity(4, numpy.float32)
if (pos):
uid = getGUID(pos)
if (uid == 0x2012): # => Euler XYZ
rot = getPoint3D(pos)
r = FreeCAD.Rotation(degrees(rot[2]), degrees(rot[1]), degrees(rot[0]))
elif (uid == 0x0000000000442313): #'TCB Rotation'
rot = pos.getFirst(0x2504).data
r = FreeCAD.Rotation(rot[0], rot[1], rot[2], rot[3])
elif (uid == 0x000000004B4B1003): #'Rotation List'
refs = getReferences(pos)
if (len(refs) > 3):
return getRotation(refs[0])
elif (uid == 0x3A90416731381913): #'Rotation Wire'
return getRotation(getReferences(pos)[0])
else:
FreeCAD.Console.PrintError("Unknown rotation 0x%04X=%s!\n" %(uid, pos))
if (r):
m = FreeCAD.Placement(FreeCAD.Vector(), r).toMatrix()
mtx = numpy.array([
[m.A11, m.A12, m.A13, m.A14],
[m.A21, m.A22, m.A23, m.A24],
[m.A31, m.A32, m.A33, m.A34],
[m.A41, m.A42, m.A43, m.A44]], numpy.float32)
return mtx
def getScale(pos):
mtx = numpy.identity(4, numpy.float32)
if (pos):
uid = getGUID(pos)
if (uid == 0x2010): # => Bezier Scale
scale = pos.getFirst(0x2501)
if (scale is None): scale = pos.getFirst(0x2505)
pos = scale.data
elif (uid == 0x0000000000442315): # 'TCB Zoom'
scale = pos.getFirst(0x2501)
if (scale is None): scale = pos.getFirst(0x2505)
pos = scale.data
elif (uid == 0xFEEE238B118F7C01): # 'ScaleXYZ'
pos = getPoint3D(pos, 1.0)
else:
FreeCAD.Console.PrintError("Unknown scale 0x%04X=%s!\n" %(uid, pos))
return mtx
mtx[0,0] = pos[0]
mtx[1,1] = pos[1]
mtx[2,2] = pos[2]
return mtx
def createMatrix(prc):
mtx = numpy.identity(4, numpy.float32)
uid = getGUID(prc)
scl = None
rot = None
pos = None
if (uid == 0x2005): # Position/Rotation/Scale
pos = getPosition(getReferences(prc)[0])
rot = getRotation(getReferences(prc)[1])
scl = getScale(getReferences(prc)[2])
elif (uid == 0x9154) : # BipSlave Control
bipedSubAnim = getReferences(prc)[2]
refs = getReferences(bipedSubAnim)
scl = getScale(getReferences(refs[1])[0])
rot = getRotation(getReferences(refs[2])[0])
pos = getPosition(getReferences(refs[3])[0])
if (pos is not None):
mtx = numpy.dot(mtx, pos)
if (rot is not None):
mtx = numpy.dot(mtx, rot)
if (scl is not None):
mtx = numpy.dot(mtx, scl)
return mtx
def getProperty(properties, idx):
for child in properties.children:
if (child.type == 0x100E):
if (getShort(child.data, 0)[0] == idx): return child
return None
def getColorMax(colors, idx):
prp = getProperty(colors, idx)
if (prp is not None):
c, o = getFloats(prp.data, 15, 3)
return (c[0], c[1], c[2])
return None
def getFloatMax(colors, idx):
prp = getProperty(colors, idx)
if (prp is not None):
f, o = getFloat(prp.data, 15)
return f
return None
def getMatStandard(refs):
material = None
try:
if (len(refs) > 2):
colors = refs[2]
parameters = getReferences(colors)[0] # ParameterBlock2
material = Material()
material.set('ambient', getColorMax(parameters, 0x00))
material.set('diffuse', getColorMax(parameters, 0x01))
material.set('specular', getColorMax(parameters, 0x02))
material.set('emissive', getColorMax(parameters, 0x08))
material.set('shinines', getFloatMax(parameters, 0x0A))
transparency = refs[4] # ParameterBlock2
material.set('transparency', getFloatMax(transparency, 0x02))
except:
FreeCAD.Console.PrintError(traceback.format_exc())
FreeCAD.Console.PrintError('\n')
return material
def getMatVRay(vry):
material = Material()
try:
material.set('diffuse', getColorMax(vry, 0x01))
material.set('ambient', getColorMax(vry, 0x02))
material.set('specular', getColorMax(vry, 0x05))
# material.set('emissive', getColorMax(vry, 0x05))
# material.set('shinines', getFloatMax(vry, 0x0B))
# material.set('transparency', getFloatMax(vry, 0x02))
except:
FreeCAD.Console.PrintError(traceback.format_exc())
FreeCAD.Console.PrintError('\n')
return material
def getMatArchDesign(ad):
material = Material()
try:
material.set('diffuse', getColorMax(ad, 0x1A))
# material.set('ambient', getColorMax(ad, 0x02))
# material.set('specular', getColorMax(ad, 0x05))
# material.set('emissive', getColorMax(ad, 0x05))
# material.set('shinines', getFloatMax(ad, 0x0B))
# material.set('transparency', getFloatMax(ad, 0x02))
except:
FreeCAD.Console.PrintError(traceback.format_exc())
FreeCAD.Console.PrintError('\n')
return material
def adjustMaterial(obj, mat):
material = None
if (mat is not None):
uid = getGUID(mat)
if (uid == 0x0002): # 'Standard'
refs = getReferences(mat)
material = getMatStandard(refs)
elif (uid == 0x0000000000000200): # 'Multi/Sub-Object'
refs = getReferences(mat)
material = adjustMaterial(obj, refs[-1])
elif (uid == 0x7034695C37BF3F2F): # 'VRayMtl'
refs = getTypedRefernces(mat)
material = getMatVRay(refs[1])
elif (uid == 0x4A16365470B05735): # 'Arch & Design'
refs = getReferences(mat)
material = getMatArchDesign(refs[0])
else:
FreeCAD.Console.PrintWarning("Unknown material GUID=%016X (%s) - skipped\n!" %(uid, getClsName(mat)))
if (obj is not None) and (material is not None):
obj.ViewObject.ShapeMaterial.AmbientColor = material.get('ambient', (0,0,0))
obj.ViewObject.ShapeMaterial.DiffuseColor = material.get('diffuse', (0.8,0.8,0.8))
#obj.ViewObject.ShapeMaterial.EmissiveColor = material.get('emissive', (0,0,0))
obj.ViewObject.ShapeMaterial.SpecularColor = material.get('specular', (0,0,0))
obj.ViewObject.ShapeMaterial.Shininess = material.get('shinines', 0.2)
obj.ViewObject.ShapeMaterial.Transparency = material.get('transparency', 0.0)
def createShape3d(doc, pts, indices, shape, key, prc, mat):
name = shape.getFirst(TYP_NAME).data
cnt = len(pts)
if (cnt > 0):
if (key is not None): name = "%s_%d" %(name, key)
mtx = createMatrix(prc)
# translate the points according to the transformation matrix
pt = numpy.ones((cnt, 4), numpy.float32)
pt[:,:3] = pts
tpt = numpy.transpose(numpy.dot(mtx, numpy.transpose(pt)))
data = []
for pol in indices:
if (len(pol) > 2): # skip points and lines!
try:
ngon = [tpt[idx][0:3] for idx in pol]
for triangle in triangulate.getTriangles(ngon):
data.append(triangle)
except:
pass
if (len(data) > 0):
obj = newObject(doc, name, data)
adjustMaterial(obj, mat)
return True
FreeCAD.Console.PrintWarning("no faces ... ")
return True
def calcCoordinates(data):
l, o = getInt(data, 0)
cnt = len(data) // 16
p = numpy.zeros((cnt, 3), numpy.float32)
i = 0
while (o < len(data)):
w, o = getInt(data, o)
f, o = getFloats(data, o, 3)
p[i,] = f
i += 1
return p
def calcCoordinatesI(data):
l, o = getInt(data, 0)
cnt = len(data) / 12
p = numpy.zeros((cnt, 3), numpy.float32)
i = 0
while (o < len(data)):
f, o = getFloats(data, o, 3)
p[i:0:3] = f
i += 1
return p
def getNGons4i(points):
vertex = {}
for point in points:
ngon = point.points
key = point.fH
if (key not in vertex):
vertex[key] = []
vertex[key].append(ngon)
return vertex
def getNGons5i(data):
count, o = getInt(data, 0)
ngons = []
while count > 0:
p, o = getInts(data, o, 3)
o += 8
ngons.append(p)
count -= 1
return ngons
def getNGons6i(data):
cnt, o = getInt(data, 0)
list = []
while (o < len(data)):
l, o = getInts(data, o, 6)
i = 5
while ((i > 3) and (l[i] < 0)):
i -= 1
if (i>2): list.append(l[1:i])
return list
def getNGonsNi(polys):
vertex = []
o = 0
while (o < len(polys)):
num = polys[o]
o += 1
ngon = []
k = 0
while (k < num):
p = points[polys[o]][1]
ngon.append(p)
o += 1
k += 1
vertex.append(ngon)
return vertex
def getNGonsInts(chunk):
o = 0
list = []
data = chunk.data
while (o < len(data)):
cnt, o = getInt(data, o)
points, o = getInts(data, o, cnt)
list.append(points)
return list
def calcPointNi3s(chunk):
data = chunk.data
cnt, o = getInt(data, 0)
list = []
try:
while (o < len(data)):
p = PointNi3s()
l, o = getInt(data, o)
p.points, o = getInts(data, o, l)
p.flags, o= getShort(data, o)
if ((p.flags & 0x01) != 0): p.f1, o = getInt(data, o)
if ((p.flags & 0x08) != 0): p.fH, o = getShort(data, o)
if ((p.flags & 0x10) != 0): p.f2, o = getInt(data, o)
if ((p.flags & 0x20) != 0): p.fA, o = getInts(data, o, 2 * (l - 3))
if (len(p.points) > 0):
list.append(p)
except Exception as e:
FreeCAD.Console.PrintError(traceback.format_exc())
FreeCAD.Console.PrintError('\n')
FreeCAD.Console.PrintError("%s: o = %d\n" %(e, o))
raise e
return list
def createDocObject(doc, name, creator):
obj = doc.addObject(creator, getValidName(name))
obj.Label = name
return obj
def createEditablePoly(doc, shape, msh, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Editible Poly '%s' ... " %(name))
ply = msh.getFirst(0x08FE)
indexList = [] # texture groups
coordListI = [] # texture coordinates
indicesList = [] # texture indices
point3i = None
point4i = None
point6i = None
pointNi = None
coords = None
created = False
if (ply):
for child in ply.children:
if (child.type == 0x0100): coords = calcCoordinates(child.data)# #, n x (g=uint16,x=float16,y=float16,z=float16)
elif (child.type == 0x0108): point6i = child.data
# elif (child.type == 0x010A): point3i = child.data
elif (child.type == 0x011A): point4i = calcPointNi3s(child)# comparable with 0x012B!!
# elif (child.type == 0x0120): pass # Number of groups+1
# elif (child.type == 0x0124): indexList.append(getInt(child.data, 0)[0])
# elif (child.type == 0x0128): coordListI.append(calcCoordinatesI(child.data))
# elif (child.type == 0x012B): indicesList.append(getNGonsInts(child))
# elif (child.type == 0x0130): pass # always 0
# elif (child.type == 0x0140): pass # always 0x40
# elif (child.type == 0x0150): pass
# elif (child.type == 0x0200): pass
# elif (child.type == 0x0210): pass # n, i * 1.0
# elif (child.type == 0x0240): pass
# elif (child.type == 0x0250): pass
elif (child.type == 0x0310): pointNi = child.data
# if (len(indexList) > 0):
# FreeCAD.Console.PrintMessage(" %s " %(str(indexList)))
# for i in range(len(indexList)):
# created |= createShape3d(doc, coords, indicesList[i], shape, indexList[i], mtx, mat)
# elif (point4i is not None):
if (point4i is not None):
vertex = getNGons4i(point4i)
if (len(vertex) > 0):
for key, ngons in vertex.items():
FreeCAD.Console.PrintMessage("[%d] " %(key))
created |= createShape3d(doc, coords, ngons, shape, key, mtx, mat)
else:
created = True
FreeCAD.Console.PrintWarning("no faces ... ")
elif (point6i is not None):
ngons = getNGons6i(point6i)
created = createShape3d(doc, coords, ngons, shape, None, mtx, mat)
elif (pointNi is not None):
ngons = getNGonsNi(pointNi)
created = createShape3d(doc, coords, ngons, shape, None, mtx, mat)
else:
FreeCAD.Console.PrintError("hugh? - no data found for %s?!?" %(ply))
return created
def getArrayPoint3f(values):
v = []
if len(values) >= 4:
count, offset = getInt(values, 0)
while (count > 0):
floats, offset = getFloats(values, offset, 3)
v.append(floats)
count -= 1
return v
def createEditableMesh(doc, shape, msh, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Editable Mesh '%s' ... "%(name))
ply = msh.getFirst(0x08FE)
created = False
if (ply):
vertexChunk = ply.getFirst(0x0914)
indexChunk = ply.getFirst(0x0912)
coords = getArrayPoint3f(vertexChunk.data)
ngons = getNGons5i(indexChunk.data)
created = createShape3d(doc, coords, ngons, shape, None, mtx, mat)
return created
def getMtxMshMatLyr(shape):
refs = getTypedRefernces(shape)
if (refs):
mtx = refs.get(0, None)
msh = refs.get(1, None)
mat = refs.get(3, None)
lyr = refs.get(6, None)
else:
refs = getReferences(shape)
mtx = refs[0]
msh = refs[1]
mat = refs[3]
lyr = None
if (len(refs) > 6):
lyr = refs[6]
return mtx, msh, mat, lyr
def adjustPlacement(obj, node):
mtx = createMatrix(node).flatten()
plc = FreeCAD.Placement(FreeCAD.Matrix(*mtx))
obj.Placement = plc
return plc
def createShell(doc, shape, shell, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Shell '%s' ... " %(name))
refs = getReferences(shell)
msh = refs[-1]
created, uid = createMesh(doc, shape, msh, mtx, mat)
if (not created):
FreeCAD.Console.PrintError("hugh? %016X: %s - " %(uid, getClsName(msh)))
return created
def createBox(doc, shape, box, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Box '%s' ... " %(name))
obj = createDocObject(doc, name, "Part::Box")
pBlock = getReferences(box)[0]
try:
obj.Length = pBlock.children[2].getFirst(0x0100).data[0]
obj.Width = pBlock.children[3].getFirst(0x0100).data[0]
h = pBlock.children[4].getFirst(0x0100).data[0]
except:
obj.Length = UNPACK_BOX_DATA(pBlock.children[1].data)[6]
obj.Width = UNPACK_BOX_DATA(pBlock.children[2].data)[6]
h = UNPACK_BOX_DATA(pBlock.children[3].data)[6]
if (h < 0):
obj.Height = -h
plc = adjustPlacement(obj, mtx)
try:
axis = obj.Shape.Faces[4].Surface.Axis * h
obj.Placement = FreeCAD.Placement(plc.Base + axis, plc.Rotation, FreeCAD.Vector(0, 0, 0))
except:
pass
else:
obj.Height = h
adjustPlacement(obj, mtx)
box.geometry = obj
return True
def createSphere(doc, shape, sphere, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Sphere '%s' ... "%(name))
obj = createDocObject(doc, name, "Part::Sphere")
pBlock = getReferences(sphere)[0]
try:
obj.Radius = pBlock.children[2].getFirst(0x0100).data[0]
except:
obj.Radius = UNPACK_BOX_DATA(pBlock.children[1].data)[6]
adjustPlacement(obj, mtx)
sphere.geometry = obj
return True
def createCylinder(doc, shape, cylinder, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Cylinder '%s' ... "%(name))
obj = createDocObject(doc, name, "Part::Cylinder")
pBlock = getReferences(cylinder)[0]
try:
r = pBlock.children[2].getFirst(0x0100).data[0]
h = pBlock.children[3].getFirst(0x0100).data[0]
except:
r = UNPACK_BOX_DATA(pBlock.children[1].data)[6]
h = UNPACK_BOX_DATA(pBlock.children[2].data)[6]
if (r < 0):
obj.Radius = -r
else:
obj.Radius = r
if (h < 0):
obj.Height = -h
plc = adjustPlacement(obj, mtx)
try:
axis = obj.Shape.Faces[0].Surface.Axis * h
obj.Placement = FreeCAD.Placement(plc.Base + axis, plc.Rotation, FreeCAD.Vector(0, 0, 0))
except:
pass
else:
obj.Height = h
adjustPlacement(obj, mtx)
cylinder.geometry = obj
return True
def createTorus(doc, shape, torus, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Torus '%s' ... "%(name))
obj = createDocObject(doc, name, 'Part::Torus')
pBlock = getReferences(torus)[0]
try:
obj.Radius1 = pBlock.children[2].getFirst(0x0100).data[0]
obj.Radius2 = pBlock.children[3].getFirst(0x0100).data[0]
except:
obj.Radius1 = UNPACK_BOX_DATA(pBlock.children[1].data)[6]
obj.Radius2 = UNPACK_BOX_DATA(pBlock.children[2].data)[6]
obj.Angle1 = -180.0
obj.Angle2 = 180.0
obj.Angle3 = 360.0
adjustPlacement(obj, mtx)
torus.geometry = obj
return True
def createTube(doc, shape, tube, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Tube '%s' ... "%(name))
obj = createDocObject(doc, name, 'Part::FeaturePython')
Shapes.TubeFeature(obj)
vp = ViewProviderShapes.ViewProviderTube(obj.ViewObject)
pBlock = getReferences(tube)[0]
try:
obj.InnerRadius = pBlock.children[2].getFirst(0x0100).data[0]
obj.OuterRadius = pBlock.children[3].getFirst(0x0100).data[0]
obj.Height = pBlock.children[4].getFirst(0x0100).data[0]
except:
obj.InnerRadius = UNPACK_BOX_DATA(pBlock.children[1].data)[6]
obj.OuterRadius = UNPACK_BOX_DATA(pBlock.children[2].data)[6]
obj.Height = UNPACK_BOX_DATA(pBlock.children[3].data)[6]
adjustPlacement(obj, mtx)
tube.geometry = obj
return True
def createCone(doc, shape, cone, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Cone '%s' ... "%(name))
obj = createDocObject(doc, name, 'Part::Cone')
pBlock = getReferences(cone)[0]
try:
obj.Radius2 = pBlock.children[2].getFirst(0x0100).data[0]
obj.Radius1 = pBlock.children[3].getFirst(0x0100).data[0]
h = pBlock.children[4].getFirst(0x0100).data[0]
except:
obj.Radius2 = UNPACK_BOX_DATA(pBlock.children[1].data)[6]
obj.Radius1 = UNPACK_BOX_DATA(pBlock.children[2].data)[6]
h = UNPACK_BOX_DATA(pBlock.children[3].data)[6]
obj.Angle = 360.0
if (h < 0):
obj.Height = -h
plc = adjustPlacement(obj, mtx)
try:
axis = obj.Shape.Faces[1].Surface.Axis * h
obj.Placement = FreeCAD.Placement(plc.Base + axis, plc.Rotation, FreeCAD.Vector(0, 0, 0))
except:
pass
else:
obj.Height = h
adjustPlacement(obj, mtx)
adjustPlacement(obj, mtx)
cone.geometry = obj
return True
def createGeoSphere(doc, shape, geo, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building GeoSphere '%s' ... "%(name))
obj = createDocObject(doc, name, "Part::Sphere")
pBlock = getReferences(geo)[0]
try:
obj.Radius = pBlock.children[4].getFirst(0x0100).data[0]
except:
obj.Radius = UNPACK_BOX_DATA(pBlock.children[3].data)[6]
adjustPlacement(obj, mtx)
geo.geometry = obj
return True
def createTeapot(doc, shape, teapot, mat, mtx):
return createSkippable(doc, shape, teapot, mat, mtx, 'Teapot')
def createPlane(doc, shape, plane, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Plane '%s' ... "%(name))
obj = createDocObject(doc, name, 'Part::Plane')
pBlock = getReferences(plane)[0]
try:
obj.Length = pBlock.children[2].getFirst(0x0100).data[0]
obj.Width = pBlock.children[3].getFirst(0x0100).data[0]
except:
obj.Length = UNPACK_BOX_DATA(pBlock.children[1].data)[6]
obj.Width = UNPACK_BOX_DATA(pBlock.children[2].data)[6]
adjustPlacement(obj, mtx)
plane.geometry = obj
return True
def createPyramid(doc, shape, pyramid, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building Pyramid '%s' ... "%(name))
obj = createDocObject(doc, name, "Part::Wedge")
pBlock = getReferences(pyramid)[0]
try:
l = pBlock.children[2].getFirst(0x0100).data[0]
w = pBlock.children[3].getFirst(0x0100).data[0]
h = pBlock.children[4].getFirst(0x0100).data[0]
except:
l = UNPACK_BOX_DATA(pBlock.children[1].data)[6]
w = UNPACK_BOX_DATA(pBlock.children[2].data)[6]
h = UNPACK_BOX_DATA(pBlock.children[3].data)[6]
obj.Xmin = 0.0
obj.Ymin = 0.0
obj.Zmin = 0.0
obj.X2min = l/2
obj.Z2min = w/2
obj.Xmax = l
obj.Ymax = h
obj.Zmax = w
obj.X2max = l/2
obj.Z2max = w/2
adjustPlacement(obj, mtx)
pyramid.geometry = obj
return True
def createProBoolean(doc, shape, pro, mat, mtx):
name = shape.getFirst(TYP_NAME).data
FreeCAD.Console.PrintMessage(" building ProBoolean '%s' ... " %(name))
pBlocks = getReferences(pro)
# Types:
# 0x12 - 0x2034=[366], 0x2150(0x100, 0x110(0x120, 0x130))), 0x204B='.', 0x100=0x01
# 0x11 - 0x2034=[371,375,376], 0x204B '.', 0x7230=0x00000000, 0x7231=0x00000000, 0x2535=0x00000000
# 0x13 - 0x2034=[378], 0x2150(0x100, 0x110(0x120, 0x130))), 0x204B='.', 0x100=''
# 0x11 - 0x2034=[383,387,388], 0x204B '.', 0x7230=0x00000000, 0x7231=0x00000000, 0x2535=0x00000000
# obj = createDocObject(doc, name, "Part::Boolean")
return True
def createSkippable(doc, shape, msh, mat, mtx, type):
name = shape.getFirst(TYP_NAME).data
# skip creating skippable!
FreeCAD.Console.PrintMessage(" skipping %s '%s'... " %(type, name))
return True
def createMesh(doc, shape, msh, mtx, mat):
created = False
uid = getGUID(msh)
msh.geometry = None
if (uid == 0x0E44F10B3):
created = createEditableMesh(doc, shape, msh, mat, mtx)
elif (uid == 0x192F60981BF8338D):
created = createEditablePoly(doc, shape, msh, mat, mtx)
elif (uid == 0x0000000000000010): # Box
created = createBox(doc, shape, msh, mat, mtx)
elif (uid == 0x0000000000000011): # Sphere
created = createSphere(doc, shape, msh, mat, mtx)
elif (uid == 0x0000000000000012): # Cylinder
created = createCylinder(doc, shape, msh, mat, mtx)
elif (uid == 0x0000000000000020): # Torus
created = createTorus(doc, shape, msh, mat, mtx)
elif (uid == 0x0000000000002032):
created = createShell(doc, shape, msh, mat, mtx)
elif (uid == 0x0000000000002033):
created = createShell(doc, shape, msh, mat, mtx)
elif (uid == 0x0000000000007B21): # Tube
created = createTube(doc, shape, msh, mat, mtx)
elif (uid == 0x00000000A86C23DD): # Cone
created = createCone(doc, shape, msh, mat, mtx)
elif (uid == 0x00007F9E00000000): # GeoSphere
created = createGeoSphere(doc, shape, msh, mat, mtx)
# elif (uid == 0x2257F99331CEA620): # ProBoolean
# created = createProBoolean(doc, shape, msh, mat, mtx)
elif (uid == 0x4BF37B1076BF318A): # Pyramid
created = createPyramid(doc, shape, msh, mat, mtx)
elif (uid == 0x77566f65081f1dfc): # Plane
created = createPlane(doc, shape, msh, mat, mtx)
elif (uid == 0xACAD26D9ACAD13D3): # Teapot
created = createTeapot(doc, shape, msh, mat, mtx)
else:
type = SKIPPABLE.get(uid)
if (type is not None):
created = createSkippable(doc, shape, msh, mat, mtx, type)
return created, uid
def createObject(doc, shape):
parent = getNodeParent(shape)
shape.parent = parent
name = getNodeName(shape)
mtx, msh, mat, lyr = getMtxMshMatLyr(shape)
while ((parent is not None) and (getGUID(parent) != 0x0002)):
name = "%s/%s" %(getNodeName(parent), name)
prnMtx = parent.matrix
if (prnMtx): mtx = mtx.dot(prnMtx)
parent = getNodeParent(parent)
created, uid = createMesh(doc, shape, msh, mtx, mat)
if (not created):
if (uid is None):
FreeCAD.Console.PrintWarning("skipped unknown object %s!\n" %(uid, msh))
else:
FreeCAD.Console.PrintWarning("skipped object %016X=%s!\n" %(uid, msh))
else:
doc.recompute()
FreeCAD.Console.PrintMessage("DONE!\n")
def makeScene(doc, parent, level = 0):
if (level==0):
progressbar = FreeCAD.Base.ProgressIndicator()
progressbar.start(" building objects ...", len(parent.children))
for scene in parent.children:
if (level==0): progressbar.next()
if (isinstance(scene, SceneChunk)):
if ((getGUID(scene) == 0x0001) and (getSuperId(scene) == 0x0001)):
try:
createObject(doc, scene)
except Exception as e:
FreeCAD.Console.PrintError(traceback.format_exc())
if (level==0): progressbar.stop()
def readScene(doc, ole, fileName):
global SCENE_LIST
SCENE_LIST = readChunks(ole, 'Scene', fileName+'.Scn.bin', containerReader=SceneChunk)
makeScene(doc, SCENE_LIST[0], 0)
def read(doc, fileName):
if (olefile.isOleFile(fileName)):
setEndianess(LITTLE_ENDIAN)
ole = olefile.OleFileIO(fileName)
p = ole.getproperties('\x05DocumentSummaryInformation', convert_time=True, no_conversion=[10])
p = ole.getproperties('\x05SummaryInformation', convert_time=True, no_conversion=[10])
if (DEBUG): FreeCAD.Console.PrintMessage("==== ClassData ===\n")
readClassData(ole, fileName)
if (DEBUG): FreeCAD.Console.PrintMessage("==== Config ===\n")
readConfig(ole, fileName)
if (DEBUG): FreeCAD.Console.PrintMessage("==== DllDirectory ===\n")
readDllDirectory(ole, fileName)
if (DEBUG): FreeCAD.Console.PrintMessage("==== ClassDirectory3 ===\n")
readClassDirectory3(ole, fileName)
if (DEBUG): FreeCAD.Console.PrintMessage("==== VideoPostQueue ===\n")
readVideoPostQueue(ole, fileName)
if (DEBUG): FreeCAD.Console.PrintMessage("==== Scene ===\n")
readScene(doc, ole, fileName)
else:
FreeCAD.Console.PrintError("File seems to be no 3D Studio Max file!")
|
from kivy.config import Config
# Config.set('kivy', 'exit_on_escape', '0')
# Config.set('graphics', 'resizable', '0')
Config.set('graphics', 'width', '640')
Config.set('graphics', 'height', '480')
import os
import cv2
from detection import Face_Detector, Landmark_Detector
from faceswap_cam import face_swap
from kivy.app import App
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.uix.screenmanager import Screen, ScreenManager
# from kivy.properties import ObjectProperty
import numpy as np
class MyScreenManager(ScreenManager):
pass
class PreScreen(Screen):
pass
class FrontScreen(Screen):
def __init__(self, **kwargs):
super(FrontScreen, self).__init__(**kwargs)
self.refresh_dt = 0.05
def on_enter(self, *args): # only works for multiple screens?
self.face_detector = Face_Detector()
self.lmk_detector = Landmark_Detector()
self.portraits = os.listdir('./portraits/')
# print(self.portraits)
'''
dropdown menu
'''
self.dropdown = DropDown()
for face in self.portraits:
btn = Button(text=face, size_hint_y=None, height=32,
# color = (0,0,1,1),
# background_normal='',background_color=(0.11, 0.17, 0.44, 0.2)
)
btn.bind(on_release=lambda btn: self.dropdown.select(btn.text))
self.dropdown.add_widget(btn)
self.ids.face_selection.bind(on_release=self.dropdown.open)
self.dropdown.bind(on_select=lambda instance, x: setattr(self.ids.face_selection, 'text', x))
def initialize(self, target_face):
# self.face_detector = Face_Detector()
# self.lmk_detector = Landmark_Detector()
try:
_source = int(self.ids.cam.text)
except Exception as ee:
_source = self.ids.cam.text
self.cap = cv2.VideoCapture( _source )
self.FaceSwap = face_swap( os.path.join('./portraits', target_face) )
def swap_face(self, *args):
ret, frame = self.cap.read()
frame = cv2.resize(frame, (480,640))
bboxes, _ = self.face_detector.detect(frame) # get faces
if len(bboxes) != 0:
bbox = bboxes[0] # get the first
bbox = bbox.astype(np.int)
lmks, PRY_3d = self.lmk_detector.detect(frame, bbox) # get landmarks
lmks = lmks.astype(np.int)
frame = self.FaceSwap.run(frame,lmks)
cv2.imshow("Face Swap", frame)
def update(self,*args):
Clock.schedule_interval(self.swap_face, self.refresh_dt)
def stop(self):
Clock.unschedule(self.swap_face)
cv2.destroyWindow('Face Swap')
root_widget = Builder.load_string('''
MyScreenManager:
PreScreen:
FrontScreen:
<PreScreen>:
Image:
source: ''
allow_stretch: True
keep_ratio: False
size: root.size
Button:
text: 'GO'
font_size:40
center: root.center
color: 1,0,1,1
background_color: 0,0,0,0
on_release: app.root.current = 'front'
<FrontScreen>:
name: 'front'
Image:
source: ''
allow_stretch: True
keep_ratio: False
size: root.size
Button:
id: face_selection
center: root.center
text: 'Select a face'
size: 0.25*root.width, root.height//13
# on_press: print(root.portraits)
Label:
text: 'Camera'
color: (1, 0.6, 0, 1)
font_size: 24
center: 0.2*root.width , 0.65*root.height
TextInput:
id: cam
text: '0'
font_size: 12
multiline: False
center: 0.52*root.width , 0.625*root.height
size: (0.3*root.width, root.height//16)
padding: [0.02*root.width,self.height // 2 - (self.line_height//2) * len(self._lines), 0, 0]
font_size: dp(18)
color:(0.11, 0.17, 0.44, 1.0)
Button:
id: start
text: 'START'
center: root.width//2, 0.3*root.height
height: root.height//13
on_release: root.initialize(face_selection.text)
on_release: root.update()
Button:
id: reset
text: 'RESET'
center: 1.5*root.width//2, 0.47*root.height
height: root.height//13
on_release: root.stop()
''')
class faceApp(App):
def build(self):
self.title = 'Face Swap'
return root_widget
faceApp().run() |
"""External tests associated with doctest_private_tests.py.
>>> my_function(['A', 'B', 'C'], 2)
['A', 'B', 'C', 'A', 'B', 'C']
"""
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Session Dumper."""
from __future__ import absolute_import
from __future__ import division
import abc
import copy
import io
import json
import random
import sys
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr_os
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.console import progress_tracker
from googlecloudsdk.core.resource import yaml_printer
from googlecloudsdk.core.util import files
import six
from six.moves import builtins
from six.moves import StringIO
class _Mock(object):
"""A class to mock and unmock a function."""
def __init__(self, target, function, new=None, return_value=None):
if new is None:
new = lambda *args, **kwargs: return_value
self._target = target
self._function = function
self._real_function = getattr(target, function)
self._new = new
def Start(self):
setattr(self._target, self._function, self._new)
def Stop(self):
setattr(self._target, self._function, self._real_function)
class _StreamCapturerBase(io.IOBase):
"""A base class for input/output stream capturers."""
def __init__(self, real_stream):
self._real_stream = real_stream
self._capturing_stream = StringIO()
def isatty(self, *args, **kwargs):
return True
def flush(self):
self._capturing_stream.flush()
self._real_stream.flush()
def GetValue(self):
return self._capturing_stream.getvalue()
class OutputStreamCapturer(_StreamCapturerBase):
"""A file-like object that captures all the information wrote to stream."""
def write(self, *args, **kwargs):
self._capturing_stream.write(*args, **kwargs)
self._real_stream.write(*args, **kwargs)
def writelines(self, *args, **kwargs):
self._capturing_stream.writelines(*args, **kwargs)
self._real_stream.writelines(*args, **kwargs)
class InputStreamCapturer(_StreamCapturerBase):
"""A file-like object that captures all the information read from stream."""
def read(self, *args, **kwargs):
result = self._real_stream.read(*args, **kwargs)
self._capturing_stream.write(result)
return result
def readline(self, *args, **kwargs):
result = self._real_stream.readline(*args, **kwargs)
self._capturing_stream.writelines([result])
return result
def readlines(self, *args, **kwargs):
result = self._real_stream.readline(*args, **kwargs)
self._capturing_stream.writelines(result)
return result
@six.add_metaclass(abc.ABCMeta)
class FileIoCapturerBase(object): # pytype: disable=ignored-abstractmethod
"""A base class to capture fileIO."""
def __init__(self):
self._outputs = []
self._private_outputs = []
self._real_open = builtins.open
self._real_private = files.OpenForWritingPrivate
self._mocks = (
_Mock(builtins, 'open', new=self.Open),
_Mock(files, 'OpenForWritingPrivate', new=self.OpenForWritingPrivate),
)
def Mock(self):
for m in self._mocks:
m.Start()
@abc.abstractmethod
def Open(self, name, mode='r', buffering=-1):
pass
@abc.abstractmethod
def OpenForWritingPrivate(self, path, binary=False):
pass
def Unmock(self):
for m in self._mocks:
m.Stop()
def GetOutputs(self):
return self._GetResult(self._outputs)
def GetPrivateOutputs(self):
return self._GetResult(self._private_outputs)
@staticmethod
def _GetResult(array):
result = []
for f in array:
f['capturer'].flush()
result.append({
'name': f['name'],
'content': f['capturer'].GetValue() if hasattr(
f['capturer'], 'GetValue') else f['capturer'].getvalue()
})
return result
@staticmethod
def _ShouldCaptureFile(name, frame):
if name == properties.VALUES.core.capture_session_file.Get():
return False
if name.endswith('.py'):
if frame.f_code.co_name in ('updatecache',):
return False
return True
@staticmethod
def _Save(array, name, capturer):
array.append({'name': name, 'capturer': capturer})
class FileIoCapturer(FileIoCapturerBase):
"""A class to capture all the fileIO of the session."""
def __init__(self):
super(FileIoCapturer, self).__init__()
self._inputs = []
self.Mock()
def Open(self, name, mode='r', buffering=-1):
if not self._ShouldCaptureFile(name, sys._getframe().f_back): # pylint: disable=protected-access
return self._real_open(name, mode, buffering)
if 'w' in mode:
capturer = OutputStreamCapturer(self._real_open(name, mode, buffering))
self._Save(self._outputs, name, capturer)
else:
capturer = InputStreamCapturer(self._real_open(name, mode, buffering))
self._Save(self._inputs, name, capturer)
return capturer
def OpenForWritingPrivate(self, path, binary=False):
capturer = OutputStreamCapturer(self._real_private(path, binary))
self._Save(self._private_outputs, path, capturer)
return capturer
def GetInputs(self):
return self._GetResult(self._inputs)
class SessionDeterminer(object):
"""A class to mock several things that may make session undetermined as is."""
_mocks = []
@classmethod
def Mock(cls):
if cls._mocks:
raise Exception('Mocks already created')
cls._mocks = [
_Mock(progress_tracker.ProgressTracker, '_autotick',
new=property(lambda self: False)),
_Mock(progress_tracker.ProgressTracker, 'Tick',
new=lambda self: self._done), # pylint: disable=protected-access
]
for m in cls._mocks:
m.Start()
@classmethod
def Unmock(cls):
for m in cls._mocks:
m.Stop()
cls._mocks = []
@six.add_metaclass(abc.ABCMeta)
class _StateMock(object): # pytype: disable=ignored-abstractmethod
"""A class to represent a simple mock."""
def __init__(self, default_value):
self.default_value = default_value
@abc.abstractmethod
def Capture(self):
pass
@abc.abstractmethod
def Mock(self, test, value):
pass
class _FunctionStateMock(_StateMock):
"""A class to mock a call to some function."""
def __init__(self, target, func, default_value):
super(_FunctionStateMock, self).__init__(default_value)
self._func_to_call = getattr(target, func) # pylint: disable=invalid-name
self._target = target
self._func = func
def Capture(self):
return self._func_to_call()
def Mock(self, test, value):
test.StartObjectPatch(self._target, self._func, return_value=value)
class _RandomStateMock(_StateMock):
"""A class to mock random."""
def __init__(self):
super(_RandomStateMock, self).__init__(0)
def Capture(self):
# Create a new unique random seed: the state is different each run and
# hashes will be different with high probability
random_seed = hash(random.getstate())
random.seed(random_seed)
return random_seed
def Mock(self, unused_test, value):
random.seed(value)
class classproperty(object): # pylint: disable=invalid-name
"""Decorator that can be used to make @classmethod like @properties."""
def __init__(self, property_fn):
self.fget = property_fn
def __get__(self, unused_instance, typ):
return self.fget(typ)
def GetHttpRequestDict(uri, method, body, headers):
return {
'uri': uri,
'method': method,
'body': body,
'headers': _FilterHeaders(headers)
}
def _FilterHeaders(headers):
return {
k: v for k, v in six.iteritems(headers) if _KeepHeader(k)
}
def _KeepHeader(header):
if header.startswith('x-google'):
return False
if header in ('user-agent', 'Authorization', 'content-length',):
return False
return True
class SessionCapturer(object):
"""Captures the session to file."""
capturer = None # is SessionCapturer if session is being captured
def __init__(self, capture_streams=True):
self._records = []
SessionDeterminer.Mock()
if capture_streams:
self._streams = (OutputStreamCapturer(sys.stdout),
OutputStreamCapturer(sys.stderr),)
sys.stdout, sys.stderr = self._streams # pylint: disable=unpacking-non-sequence
log.Reset(*self._streams)
self._stdin = InputStreamCapturer(sys.stdin)
sys.stdin = self._stdin
self._fileio = FileIoCapturer()
else:
self._streams = None
self._stdin = None
self._fileio = None
def CaptureHttpRequest(self, uri, method, body, headers):
self._records.append({
'request': GetHttpRequestDict(uri, method, body, headers)
})
def CaptureHttpResponse(self, response, content):
self._records.append({
'response': {
'response': _FilterHeaders(response),
'content': self._ToList(content)
}})
def CaptureArgs(self, args):
"""Captures command line args."""
specified_args = {}
command = args.command_path[1:]
for k, v in six.iteritems(args.GetSpecifiedArgs()):
if not k.startswith('--'):
if isinstance(v, six.string_types):
command.append(v)
elif isinstance(v, list):
command += v
else:
raise Exception('Unknown args type {}'.format(type(v)))
elif k != '--capture-session-file':
specified_args[k] = v
self._records.append({
'args': {
'command': ' '.join(command),
'specified_args': specified_args
}
})
_STATE_MOCKS = None
@classproperty
def STATE_MOCKS(cls): # pylint: disable=invalid-name
if cls._STATE_MOCKS is None:
cls._STATE_MOCKS = {
'interactive_console': _FunctionStateMock(
console_io, 'IsInteractive', False),
'random_seed': _RandomStateMock(),
'term_size': _FunctionStateMock(
console_attr_os, 'GetTermSize', (80, 24))
}
return cls._STATE_MOCKS
def CaptureState(self):
state = {}
for k, v in six.iteritems(self.STATE_MOCKS):
result = v.Capture()
if result != v.default_value:
state[k] = result
self._records.append({
'state': state
})
def CaptureProperties(self, all_values):
values = copy.deepcopy(all_values)
for k in ('capture_session_file', 'account'):
if k in values['core']:
values['core'].pop(k)
self._records.append({
'properties': values
})
def CaptureException(self, exc):
self._records.append({
'exception': {
'type': str(type(exc)),
'message': exc.message
}
})
def Print(self, stream, printer_class=yaml_printer.YamlPrinter):
self._Finalize()
printer = printer_class(stream)
for record in self._FinalizeRecords(self._records):
printer.AddRecord(record)
def _Finalize(self):
"""Finalize records, restore state."""
if self._streams is not None:
for stream in self._streams + (self._stdin,):
stream.flush()
self._fileio.Unmock()
output = {}
if self._streams[0].GetValue():
output['stdout'] = self._streams[0].GetValue()
if self._streams[1].GetValue():
output['stderr'] = self._streams[1].GetValue()
if self._fileio.GetOutputs():
output['files'] = self._fileio.GetOutputs()
if self._fileio.GetPrivateOutputs():
output['private_files'] = self._fileio.GetPrivateOutputs()
self._records.append({
'output': output
})
inputs = {}
if self._stdin.GetValue():
inputs['stdin'] = self._stdin.GetValue()
if self._fileio.GetInputs():
inputs['files'] = self._fileio.GetInputs()
self._records.insert(3, {
'input': inputs
})
SessionDeterminer.Unmock()
@staticmethod
def _FinalizePrimitive(primitive):
if isinstance(primitive, six.string_types):
project = properties.VALUES.core.project.Get()
if not project:
return primitive
return primitive.replace(project, 'fake-project')
elif (isinstance(primitive, (float, type(None))) or
isinstance(primitive, six.integer_types)):
return primitive
else:
raise Exception('Unknown primitive type {}'.format(type(primitive)))
def _FinalizeRecords(self, records):
if isinstance(records, dict):
return {
self._FinalizePrimitive(k):
self._FinalizeRecords(v) for k, v in six.iteritems(records)
}
elif isinstance(records, (list, tuple)):
return [
self._FinalizeRecords(r) for r in records
]
else:
return self._FinalizePrimitive(records)
def _ToList(self, response):
"""Transforms a response to a batch request into a list.
The list is more human-readable than plain response as it contains
recognized json dicts.
Args:
response: str, The response to be transformed.
Returns:
list, The result of transformation.
"""
# Check if the whole response is json
try:
return [{'json': json.loads(response)}]
except ValueError:
pass
result = []
while True:
json_content_idx = response.find('Content-Type: application/json;')
if json_content_idx == -1:
result.append(response)
break
json_start_idx = response.find(
'\r\n\r\n{', json_content_idx) + len('\r\n\r\n')
json_end_idx = response.find('}\n\r\n', json_start_idx) + 1
if json_end_idx <= json_start_idx:
result.append(response)
break
try:
parts = [response[:json_start_idx],
{'json': json.loads(response[json_start_idx:json_end_idx])}]
except ValueError:
parts = [response[:json_end_idx]]
result += parts
response = response[json_end_idx:]
return result
|
from __future__ import unicode_literals
import keyword
import re
from optparse import make_option
import fnmatch
import json
import pyodbc
from django.db import connections
#from django.conf import settings
#from django.utils import six
from django.core.management.commands.inspectdb import Command as InspectDBCommand
from pug.db.explore import db_meta, get_indexes
from pug.db.sqlserver import get_meta_tuples
from pug.nlp.db import clean_utf8
connection = pyodbc.connect('Driver=FreeTDS;Server=SERVERNAME;DATABASE=DBNAME;UID=UNAME;PWD=CATCHMEIFYOUCAN;TDS_Version=7.2;PORT=1433')
# >>> cursor = connection.cursor()
def callable_table_name_filter(table_name, filter_string=None, lowercase=True):
if filter_string is None:
filter_string = callable_table_name_filter.filter_string
# is the filter a glob expression (rather than a compiled regex)?
if isinstance(filter_string, basestring):
if lowercase:
filter_string = filter_string.lower()
if any(c in filter_string for c in ('[', '*', '?')):
filter_string = re.compile(fnmatch.translate(filter_string))
else:
if table_name == filter_string:
return True
# print '%r != %r' % (table_name, filter_string)
return False
if filter_string.match(table_name):
return True
# print 'no match for %r and %r' % (table_name, filter_string)
return False
callable_table_name_filter.filter_string = re.compile(r'[a-zA-Z_@#]+') # valid characters in MS Sql Server Compatability Level 100, unicode characters are also allowed, but not by this regex
#TODO: override django.core.management.commands.Command
class Command(InspectDBCommand):
help = "Introspects the given MS SQL Database and outputs a fully function Django models.py (with primary_keys defined) string to stdout."
option_list = InspectDBCommand.option_list + (
#make_option('--database', action='store', dest='database',
# default=DEFAULT_DB_ALIAS, help='Nominates a database to '
# 'introspect. Defaults to using the "default" database.'),
# make_option('--table', action='store', dest='table',
# default=None, help='Table to compose model for (default = all).'),
# make the stealth option explicit and accessible from the command-line
make_option('--table_name_filter', action='store', dest='table_name_filter',
default=None, help='Table to compose model for (default = all).'),
make_option('--app', action='store', dest='app',
default='crawler', help='App name to examine and compose data model for (default = all).'),
# make_option('--extra', action='store_true', dest='extra',
# default=None, help='Whether to to use custom MS SQL to get extra meta data about tables and fields.'),
)
def handle_inspection(self, options):
verbosity = options.get('verbosity')
app = options.get('app')
connection = connections[options.get('database')]
# use_extra = connections[options.get('extra')]
# 'table_name_filter' is a stealth option -- callable that returns True if table name should be processed
table_name_filter = options.get('table_name_filter')
if table_name_filter is not None and isinstance(table_name_filter, basestring):
callable_table_name_filter.filter_string = table_name_filter
table_name_filter = callable_table_name_filter
# because the parent method checks the stealth option
options['table_name_filter'] = table_name_filter
meta = db_meta(app=app, table=table_name_filter, verbosity=int(verbosity))
if verbosity > 1:
print meta
# 'one_table_name' is a new NONstealth option -- string that indicates the one table (or model) that *should* be processed
#one_table_name = options.get('table')
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
strip_prefix = lambda s: s.startswith("u'") and s[1:] or s
cursor = connection.cursor()
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
yield ''
known_models = []
print meta.keys()
print json.dumps(meta, indent=2)
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
# if one_table_name is not None and (table_name != one_table_name and table2model(table_name) != one_table_name):
# continue
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = get_indexes(meta, table_name)
used_column_names = [] # Holds column names used in the table so far
table_columns = connection.introspection.get_table_description(cursor, table_name)
table_meta = get_meta_tuples(cursor, table_name)
for i, (row, extra_meta) in enumerate(zip(table_columns, table_meta)):
#print i
#print row
#print extra_meta
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = {} # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = i in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}:
continue
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = models.%s' % (att_name, field_type)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join([
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = clean_utf8(col_name)
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = {}
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = row[3]
if field_type == 'DecimalField':
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return [" class Meta:",
" db_table = '%s'" % table_name,
""]
|
# -*- coding: utf-8 -*-
# Copyright 2019 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import marshmallow
from marshmallow import fields
from marshmallow import validate
class Location(marshmallow.Schema):
rel = fields.Str(required=True)
href = fields.Url(required=True)
type = fields.Str(required=True)
class Version(marshmallow.Schema):
version = fields.Str(required="True")
id = fields.Str(required="True")
links = fields.Nested(Location)
type = fields.Str()
class Versions(marshmallow.Schema):
versions = fields.List(fields.Nested(Version))
class Failure(marshmallow.Schema):
message = fields.Str(required=True,
description="Failure message")
class Prediction(marshmallow.Schema):
status = fields.String(required=True,
description='Response status message')
predictions = fields.Str(required=True,
description='String containing predictions')
class ModelMeta(marshmallow.Schema):
id = fields.Str(required=True, description='Model identifier') # noqa
name = fields.Str(required=True, description='Model name')
description = fields.Str(required=True,
description='Model description')
license = fields.Str(required=False, description='Model license')
author = fields.Str(required=False, description='Model author')
version = fields.Str(required=False, description='Model version')
url = fields.Str(required=False, description='Model url')
links = fields.List(fields.Nested(Location))
class Training(marshmallow.Schema):
uuid = fields.UUID(required=True, description='Training identifier')
date = fields.DateTime(required=True, description='Training start time')
status = fields.Str(
required=True,
description='Training status',
enum=["running", "error", "completed", "cancelled"],
validate=validate.OneOf(["running", "error", "completed", "cancelled"])
)
message = fields.Str(description="Optional message explaining status")
class TrainingList(marshmallow.Schema):
trainings = fields.List(fields.Nested(Training))
|
from fact.io import read_h5py
from fact.analysis.statistics import li_ma_significance
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from functools import lru_cache, partial
from scipy.optimize import minimize
from numdifftools import Hessian
INVALID = np.finfo(np.float32).max
EPS = 1e-10
# mc information
scatter_radius = 270 # Maximaler simulierter Abstand der Schauer zum Teleskope
sample_fraction = 0.7 # Anteil des Testdatensatzer an der Gesamtzahl simulierten Schauer
area = np.pi * scatter_radius**2
# binning
n_bins_true = 5
n_bins_est = 10
e_min_est = 700
e_min_true = 700
e_max_est = 15e3
e_max_true = 15e3
@lru_cache()
def C_matrix(n):
I = np.eye(n)
C = 2.0 * I - np.roll(I, 1) - np.roll(I, -1)
return C
def llh_poisson(f_est, A, g, b):
if np.any(f_est < 0):
return INVALID
lambda_ = A @ f_est + b
return np.sum(lambda_ - g * np.log(lambda_ + EPS))
def tikhonov_reg(f_est, tau, effective_area):
# we ignore under and overflow for the regularization
C = C_matrix(len(f_est) - 2)
# we regularize on the log of f with acceptance correction,
# since only that is expected to be flat
return tau * np.sum((C @ np.log(f_est[1:-1] / effective_area[1:-1] + EPS)) ** 2)
def llh_poisson_tikhonov(f_est, A, g, b, tau, effective_area):
if np.any(f_est < 0):
return INVALID
return llh_poisson(f_est, A, g, b) + tikhonov_reg(f_est, tau, effective_area)
def mean_correlation(cov):
cov_inv = np.linalg.inv(cov)
return np.mean(
np.sqrt(1 - 1 / (np.diag(cov) * np.diag(cov_inv)))
)
def unfold(A, g, b, tau, a_eff):
# allow only positive values
bounds = [[1e-15, None] for _ in range(len(a_eff))]
# uniform initial guess
initial_guess = np.full(len(a_eff), 50)
nllh = partial(
llh_poisson_tikhonov,
A=A, g=g, b=b,
tau=tau, effective_area=a_eff
)
result = minimize(nllh, x0=initial_guess, bounds=bounds)
hesse = Hessian(nllh)
cov = np.linalg.inv(hesse(result.x))
assert result.success
return result.x, cov
if __name__ == '__main__':
bins_e_true = np.logspace(np.log10(e_min_true), np.log10(e_max_true), n_bins_true + 1)
bins_e_est = np.logspace(np.log10(e_min_est), np.log10(e_max_est), n_bins_est + 1)
bins_e_true = np.concatenate([[-np.inf], bins_e_true, [np.inf]])
bins_e_est = np.concatenate([[-np.inf], bins_e_est, [np.inf]])
bin_centers = 0.5 * (bins_e_true[1:-2] + bins_e_true[2:-1])
bin_width = np.diff(bins_e_true)[1:-1]
print('Reading in data')
gammas = read_h5py('build/inverse-problems/gamma_test_dl3.hdf5', key='events', columns=[
'gamma_energy_prediction',
'gamma_prediction',
'theta_deg',
'corsika_event_header_event_number',
'corsika_event_header_total_energy',
])
gammas_corsika = read_h5py(
'build/inverse-problems/gamma_corsika_headers.hdf5',
key='corsika_events',
columns=['total_energy'],
)
crab_events = read_h5py('build/inverse-problems/open_crab_sample_dl3.hdf5', key='events', columns=[
'gamma_prediction',
'gamma_energy_prediction',
'theta_deg',
'theta_deg_off_1',
'theta_deg_off_2',
'theta_deg_off_3',
'theta_deg_off_4',
'theta_deg_off_5',
])
crab_runs = read_h5py('build/inverse-problems/open_crab_sample_dl3.hdf5', key='runs')
print('Applying event selection')
on_time = crab_runs['ontime'].sum()
prediction_threshold = 0.8
theta_cut = np.sqrt(0.025)
on_query = f'gamma_prediction > {prediction_threshold} and theta_deg <= {theta_cut}'
gammas = gammas.query(on_query).copy()
crab_on = crab_events.query(on_query).copy()
# concancenate each of the off regions
crab_off = []
for i in range(1, 6):
off_query = f'gamma_prediction > {prediction_threshold} and theta_deg_off_{i} <= {theta_cut}'
crab_off.append(crab_events.query(off_query))
crab_off = pd.concat(crab_off)
off_weights = np.full(len(crab_off), 0.2)
n_on = len(crab_on)
n_off = len(crab_off)
print(f"n_on={n_on}, n_off={n_off}, σ={li_ma_significance(n_on, n_off, 0.2):.1f}")
print('Calculating response')
M, _, _ = np.histogram2d(
gammas['gamma_energy_prediction'],
gammas['corsika_event_header_total_energy'],
bins=[bins_e_est, bins_e_true],
)
M = M / M.sum(axis=0)
print('Calculating effective area')
n_detected, _ = np.histogram(gammas['corsika_event_header_total_energy'], bins=bins_e_true)
n_simulated, _ = np.histogram(gammas_corsika['total_energy'], bins=bins_e_true)
a_eff = (n_detected / sample_fraction) / n_simulated * area
print('Plotting response')
fig, (ax1, ax2) = plt.subplots(1, 2)
img = ax1.matshow(M, cmap='inferno')
ax1.set_xlabel(r'$E$-bin')
ax1.xaxis.set_label_position('top')
ax1.set_ylabel(r'$\hat{E}$-bin')
fig.colorbar(img, ax=ax1)
ax2.errorbar(bin_centers, a_eff[1:-1], xerr=bin_width / 2, linestyle='')
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.set_ylabel(r'$A_\text{eff} \mathbin{\si{\meter\squared}}')
ax2.set_xlabel(r'$E \mathbin{/} \si{\GeV}$')
ax2.set_ylim(1e3, 1e5)
fig.savefig('build/inverse-problems/fact_response.pdf')
plt.close('all')
g, _ = np.histogram(crab_on['gamma_energy_prediction'], bins=bins_e_est)
b, _ = np.histogram(crab_off['gamma_energy_prediction'], bins=bins_e_est, weights=np.full(n_off, 0.2))
print('Unfolding for many taus to find best')
taus = np.logspace(-1.5, 1.5, 100)
correlations = []
results = []
covs = []
for tau in taus:
f, cov = unfold(M, g, b, tau, a_eff)
results.append(f)
covs.append(cov)
correlations.append(mean_correlation(cov))
# best_index = np.argmin(np.abs(taus - 0.1))
best_index = np.argmin(correlations)
f = results[best_index]
cov = covs[best_index]
print('plotting best result')
fig, ax = plt.subplots()
ax.plot(taus, correlations, '.')
ax.axvline(taus[best_index], color='C1')
ax.set_xlabel(r'$\tau$')
ax.set_ylabel('Mean Correlation')
ax.set_xscale('log')
fig.savefig('build/inverse-problems/tau_vs_correlation.pdf')
plt.close('all')
norm = 1 / (a_eff[1:-1] * 1e4) / on_time / (bin_width / 1000)
e_plot = np.logspace(2.7, 4.2, 100)
fig, ax = plt.subplots()
ax.plot(
e_plot,
3.23e-11 * (e_plot / 1000)**(-2.47 - 0.24 * np.log10(e_plot / 1000)),
label='MAGIC, JHEAP 2015',
color='k'
)
ax.errorbar(
bin_centers,
f[1:-1] * norm,
xerr=bin_width / 2,
yerr=np.sqrt(np.diag(cov))[1:-1] * norm,
ls='none',
label='Unfolding',
zorder=10,
)
ax.legend()
ax.set_xlabel('E / GeV')
ax.set_ylabel('Flux / (cm⁻² s⁻¹ GeV⁻¹)')
ax.set_yscale('log')
ax.set_xscale('log')
fig.savefig('build/inverse-problems/fact_unfolding.pdf')
|
from chainer_chemistry.dataset.preprocessors.atomic_number_preprocessor import AtomicNumberPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.base_preprocessor import BasePreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.common import construct_adj_matrix # NOQA
from chainer_chemistry.dataset.preprocessors.common import construct_atomic_number_array # NOQA
from chainer_chemistry.dataset.preprocessors.common import construct_discrete_edge_matrix # NOQA
from chainer_chemistry.dataset.preprocessors.common import construct_supernode_feature # NOQA
from chainer_chemistry.dataset.preprocessors.common import MolFeatureExtractionError # NOQA
from chainer_chemistry.dataset.preprocessors.common import type_check_num_atoms # NOQA
from chainer_chemistry.dataset.preprocessors.ecfp_preprocessor import ECFPPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.relgat_preprocessor import RelGATPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.ggnn_preprocessor import GGNNPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.gnnfilm_preprocessor import GNNFiLMPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.gin_preprocessor import GINPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.gwm_preprocessor import GGNNGWMPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.gwm_preprocessor import GINGWMPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.gwm_preprocessor import NFPGWMPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.gwm_preprocessor import RSGCNGWMPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.mol_preprocessor import MolPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.nfp_preprocessor import NFPPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.relgcn_preprocessor import RelGCNPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.rsgcn_preprocessor import RSGCNPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.schnet_preprocessor import SchNetPreprocessor # NOQA
from chainer_chemistry.dataset.preprocessors.weavenet_preprocessor import WeaveNetPreprocessor # NOQA
preprocess_method_dict = {
'ecfp': ECFPPreprocessor,
'nfp': NFPPreprocessor,
'nfp_gwm': NFPGWMPreprocessor,
'ggnn': GGNNPreprocessor,
'ggnn_gwm': GGNNGWMPreprocessor,
'gin': GINPreprocessor,
'gin_gwm': GINGWMPreprocessor,
'schnet': SchNetPreprocessor,
'weavenet': WeaveNetPreprocessor,
'relgcn': RelGCNPreprocessor,
'rsgcn': RSGCNPreprocessor,
'rsgcn_gwm': RSGCNGWMPreprocessor,
'relgat': RelGATPreprocessor,
'gnnfilm': GNNFiLMPreprocessor,
}
|
"""
Django settings for trywq project.
Based on the Django 1.9 template, with wq-specific modifications noted as such.
Generated by 'wq start' 1.0.0-dev.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
For more information about wq.db's Django settings see
http://wq.io/docs/settings
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# wq: extra dirname() to account for db/ folder
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# wq: SECRET_KEY and DEBUG are defined in local_settings.py
ALLOWED_HOSTS = ["try.wq.io"]
# Application definition
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'social.apps.django_app.default',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.gis',
'rest_framework',
'reversion',
'wq.db.rest',
'wq.db.rest.auth',
'wq.db.patterns.identify',
'vera.params',
'vera.series',
'vera.results',
'campaigns',
)
WQ_EVENT_MODEL = 'campaigns.Event'
WQ_REPORT_MODEL = 'campaigns.Report'
WQ_PARAMETER_MODEL = 'campaigns.Parameter'
WQ_EVENTRESULT_MODEL = 'campaigns.EventResult'
MIDDLEWARE_CLASSES = (
'reversion.middleware.RevisionMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = "trywq.urls"
# wq: Recommended settings for Django, rest_framework, and social auth
from wq.db.default_settings import (
TEMPLATES,
SESSION_COOKIE_HTTPONLY,
REST_FRAMEWORK,
SOCIAL_AUTH_PIPELINE,
)
REST_FRAMEWORK['UPLOADED_FILES_USE_URL'] = False
TEMPLATES[0]['DIRS'] = [os.path.join(BASE_DIR, 'templates')]
# wq: Recommended settings unique to wq.db
from wq.db.default_settings import (
ANONYMOUS_PERMISSIONS,
SRID,
DEFAULT_AUTH_GROUP,
)
WSGI_APPLICATION = 'trywq.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# wq: DATABASES is defined in local_settings.py
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# wq: Put social auth backends here (see http://psa.matiasaguirre.net/docs/backends/)
AUTHENTICATION_BACKENDS = [
'social.backends.google.GoogleOAuth2',
'social.backends.twitter.TwitterOAuth',
'social.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# wq: Configure paths for default project layout
STATIC_ROOT = os.path.join(BASE_DIR, 'htdocs', 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
VERSION_TXT = os.path.join(BASE_DIR, 'version.txt')
MEDIA_URL = '/media/'
LOGIN_REDIRECT_URL = '/login'
# wq: Import local settings
try:
from .local_settings import *
except ImportError:
pass
# wq: Determine if we are running off django's testing server
import sys
DEBUG_WITH_RUNSERVER = 'manage.py' in sys.argv[0]
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import logging
from esrally import exceptions
from esrally.utils import io, git, console, versions
class RallyRepository:
"""
Manages Rally resources (e.g. teams or tracks).
"""
def __init__(self, remote_url, root_dir, repo_name, resource_name, offline, fetch=True):
# If no URL is found, we consider this a local only repo (but still require that it is a git repo)
self.url = remote_url
self.remote = self.url is not None and self.url.strip() != ""
self.repo_dir = os.path.join(root_dir, repo_name)
self.resource_name = resource_name
self.offline = offline
self.logger = logging.getLogger(__name__)
if self.remote and not self.offline and fetch:
# a normal git repo with a remote
if not git.is_working_copy(self.repo_dir):
git.clone(src=self.repo_dir, remote=self.url)
else:
try:
git.fetch(src=self.repo_dir)
except exceptions.SupplyError:
console.warn("Could not update %s. Continuing with your locally available state." % self.resource_name)
else:
if not git.is_working_copy(self.repo_dir):
if io.exists(self.repo_dir):
raise exceptions.SystemSetupError("[{src}] must be a git repository.\n\nPlease run:\ngit -C {src} init"
.format(src=self.repo_dir))
else:
raise exceptions.SystemSetupError("Expected a git repository at [{src}] but the directory does not exist."
.format(src=self.repo_dir))
def update(self, distribution_version):
try:
if self.remote:
branch = versions.best_match(git.branches(self.repo_dir, remote=self.remote), distribution_version)
if branch:
# Allow uncommitted changes iff we do not have to change the branch
self.logger.info(
"Checking out [%s] in [%s] for distribution version [%s].", branch, self.repo_dir, distribution_version)
git.checkout(self.repo_dir, branch=branch)
self.logger.info("Rebasing on [%s] in [%s] for distribution version [%s].", branch, self.repo_dir, distribution_version)
try:
git.rebase(self.repo_dir, branch=branch)
except exceptions.SupplyError:
self.logger.exception("Cannot rebase due to local changes in [%s]", self.repo_dir)
console.warn(
"Local changes in [%s] prevent %s update from remote. Please commit your changes." %
(self.repo_dir, self.resource_name))
return
else:
msg = "Could not find %s remotely for distribution version [%s]. Trying to find %s locally." % \
(self.resource_name, distribution_version, self.resource_name)
self.logger.warning(msg)
branch = versions.best_match(git.branches(self.repo_dir, remote=False), distribution_version)
if branch:
if git.current_branch(self.repo_dir) != branch:
self.logger.info("Checking out [%s] in [%s] for distribution version [%s].", branch, self.repo_dir, distribution_version)
git.checkout(self.repo_dir, branch=branch)
else:
raise exceptions.SystemSetupError("Cannot find %s for distribution version %s" % (self.resource_name, distribution_version))
except exceptions.SupplyError as e:
tb = sys.exc_info()[2]
raise exceptions.DataError("Cannot update %s in [%s] (%s)." % (self.resource_name, self.repo_dir, e.message)).with_traceback(tb)
def checkout(self, revision):
self.logger.info("Checking out revision [%s] in [%s].", revision, self.repo_dir)
git.checkout(self.repo_dir, revision)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/11 0:14
# @Author : ganliang
# @File : setup.py.py
# @Desc : 安装包
from setuptools import setup, find_packages
"""
编译 python setup.py build
安装 python setup.py install
打包(源代码发布) python setup.py sdist
将项目上传到pypi python setup.py sdist upload
打包成可执行(exe、rpm) python setup.py bdist
--formats=rpm RPM distribution
--formats=gztar gzip'ed tar file
--formats=bztar bzip2'ed tar file
--formats=ztar compressed tar file
--formats=tar tar file
--formats=wininst Windows executable installer
--formats=zip ZIP file
"""
# setup(
# name='mmscrapy',
# version='1.0',
# packages=find_packages(),
# entry_points={'scrapy': ['settings = mmscrapy.settings']}
# )
setup(name="mmscrapy",
version="0.0.1",
description="mmscrapy爬虫程序是使用scrapy框架搭建的爬虫项目,解scrapy的使用方式和学习scrapy的使用技巧、编写自己的爬虫程序、分布式爬虫功能支持,scrapy支持很多特性,不必要自己创轮子",
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
author="甘亮",
author_email="lovercws@gmail.com",
keywords="python版本的爬虫程序",
# py_modules=["main"], #将一个模块打成包
packages=find_packages(),
license='Apache License',
include_package_data=True,
platforms="any",
url="https://github.com/mumupy/mmscrapy.git",
install_requires=[],
scripts=[],
entry_points={'scrapy': ['settings = mmscrapy.settings']}
)
|
#!/usr/bin/env python3
# _*_coding:utf-8_*_
# Created by "LiuXin"
# Time 2016/5/25
# 基类
import tornado.web
from setting import client
class BaseHandler(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
super(BaseHandler, self).__init__(application, request, **kwargs)
self.db = client.smartSearch
def data_received(self, chunk):
pass
def get_current_user(self):
return self.get_secure_cookie("user")
def on_finish(self):
pass
|
from django.contrib import admin
from .models import Project, Tag, Skill, Type, Profile, AppForProject
class SkillInline(admin.TabularInline):
model = Project.skill.through
extra = 0
class TagsInline(admin.TabularInline):
model = Project.tag.through
extra = 0
class ProjectAdmin(admin.ModelAdmin):
list_display = ['project_name', 'pub_date', 'type']
list_filter = ['head', 'type']
fields = ['project_name', 'pub_date', ('start_date', 'end_date'), 'head', 'brief_summary', 'content',
'app_deadline', 'num_places', 'type', 'members', 'status'
]
inlines = [SkillInline, TagsInline]
exclude = ['Skill', 'Tag']
admin.site.register(Project, ProjectAdmin)
admin.site.register(Skill)
admin.site.register(Tag)
admin.site.register(Type)
admin.site.register(Profile)
admin.site.register(AppForProject)
|
class NodeOp:
OPERATORS = {
"Data Source": ["Data Source"],
"Filter": ["Filter"],
"Map": ["Map", "FlatMap", "CoMap", "CoFlatMap", "MapPartition"],
"Reduce": ["Reduce"],
"Group by": ["GroupReduce", "CoGroup", "GroupCombine"],
"Join": ["Join", "Outer Join"],
"Bulk Iteration": ["Bulk Iteration", "Workset Iteration"],
"Partition": ["Partition"],
"Sort-Partition": ["Sort-Partition"],
"Data Sink": ["Data Sink"]
}
FIELDS = {
"Plan Id": "plan_id",
"Grouped Pact": "g_pact",
"Parent Grouped Pact": "p_g_pact",
"Children Grouped Pact": "c_g_pact",
"parents": {
"Data Source": "#_p_source",
"Map": "#_p_map",
"Filter": "#_p_filter",
"Reduce": "#_p_reduce",
"Join": "#_p_join",
"Group by": "#_p_groupby",
"Partition": "#_p_partition",
"Sort-Partition": "#_p_sort",
"Bulk Iteration": "#_p_iteration",
},
"children": {
"Data Sink": "#_c_source",
"Map": "#_c_map",
"Filter": "#_c_filter",
"Reduce": "#_c_reduce",
"Join": "#_c_join",
"Group by": "#_c_groupby",
"Partition": "#_c_partition",
"Sort-Partition": "#_c_sort",
"Bulk Iteration": "#_c_iteration",
}
}
@classmethod
def get_operator_color(c, node):
if node["pact"] == "Bulk Iteration":
return "purple"
elif (node["pact"] == "Data Source") or (node["pact"] == "Data Sink"):
return "green"
elif node["pact"] == "Partition":
return "orange"
else:
return "lightblue"
@classmethod
def get_operator_pacts(c):
return [v for g in c.OPERATORS.values() for v in g]
def __init__(self, pact, n_id):
self.pact = pact
self.n_id = n_id
self.node_id = f"{pact}_{n_id}"
self.n_children = 1
# meta-data
if self.pact == "Bulk Iteration":
self.color = "purple"
elif (self.pact == "Data Source") or (self.pact == "Data Sink"):
self.color = "green"
else:
self.color = "lightblue"
def __str__(self):
return self.node_id
|
"""
Tests for mobile API utilities.
"""
import ddt
from django.test import TestCase
from ..decorators import mobile_course_access, mobile_view
@ddt.ddt
class TestMobileAPIDecorators(TestCase):
"""
Basic tests for mobile api decorators to ensure they retain the docstrings.
"""
@ddt.data(mobile_view, mobile_course_access)
def test_function_decorator(self, decorator):
@decorator()
def decorated_func():
"""
Test docstring of decorated function.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
assert 'Test docstring of decorated function.' in decorated_func.__doc__
assert decorated_func.__name__ == 'decorated_func'
assert decorated_func.__module__.endswith('test_decorator')
|
from base64_decode_popup.EncodingAnalyzer import EncodingAnalyzer
from base64_decode_popup.EncodingAnalysis import EncodingAnalysis
class FilteredBaseEncodingAnalyzer(EncodingAnalyzer):
"""
Analyzer which considers only results passing the
supplied string filter.
"""
def __init__(self, delegate, encoded_string_filter):
self.delegate = delegate
self.encoded_string_filter = encoded_string_filter
def analyze(self, encoded_string):
if self.encoded_string_filter.should_evaluate_encoded_string(encoded_string):
return self.delegate.analyze(encoded_string)
else:
return EncodingAnalysis(None)
|
import time
t= time.time()
limit = 5000000
collatz = {1:1,0:0}
tmp = []
num = 1
cnt = 0
while 2*num <= limit:
collatz[2*num] = collatz[num] + 1
num *= 2
for i in range(3,limit+1):
if i & 1 and i not in collatz:
tmp.clear()
num = i
while num != 1 and num not in collatz:
tmp.append(num)
if num & 1:
num = (num*3+1)//2
else:
num >>= 1
tmp.append(num)
bound = len(tmp)-1
# print(tmp)
# input()
for j in range(bound-1, -1, -1):
# if tmp[j] <= limit:
if tmp[j] & 1:
collatz[tmp[j]] = collatz[tmp[j+1]] + 2
else:
collatz[tmp[j]] = collatz[tmp[j + 1]] + 1
k = tmp[j]
while 2*k <= limit:
collatz[2*k] = 1+collatz[k]
k *= 2
cnt += 1
print(len(collatz))
print(max(collatz.values()))
print(time.time()-t)
ans = []
curr_max = 0
curr = 0
for i in range(0,limit+1):
if curr_max < collatz[i]:
curr_max = collatz[i]
curr = i
ans.append(curr)
t = int(input())
for tc in range(t):
n = int(input())
print(ans[n])
|
'''
Based on the paper: Xoodoo cookbook
found at: https://eprint.iacr.org/2018/767.pdf
C X.Y is reference to the paper at chapter X, subchapter Y
'''
# C 2 (Table 1), I randomly removed the bottom four constants so the lenth of the key would be the same as Sparkle
HASH_CONST = 0x00000058000000D000000060000000F000000038000001200000002C000001A0
def cyclic(plane, x, y):
x = x % 4
y = y % 8
plane = plane[-y:] + plane[:-y]
plane = [line[-x:] + line[:-x] for line in plane]
return plane
def hex_not(n):
'takes one hex value (4 bits) and gives its hex compliment'
n = n % 0x10
return (~n) % 0x10
def bitwise_not(plane):
return [[hex_not(i) for i in line] for line in plane]
def bitwise_xor(plane1, plane2):
return [[x^plane2[j][i] for i,x in enumerate(line)] for j,line in enumerate(plane1)]
def data2planes(data):
hexes = []
for i in range(96):
hexes.append(data ^ (data & (~0xf)))
data = data >> 4
hexes.reverse()
return [[[hexes[z*32 + y*4 + x] for x in range(4)] for y in range(8)] for z in range(3)]
def planes2data(planes):
data = 0x0
for plane in planes:
for line in plane:
for x in line:
data = data << 4
data |= x
return data
def key2planes(key):
planes = [[[0 for x in range(4)] for y in range(8)] for z in range(8)]
for plane in planes:
for line in plane:
line[0] = key ^ (key & (~0xf))
key = key >> 4
return planes
def xoodyak_enc(in_data, key=None):
'''
Runs a round of xoodoo as explained in algorithm 1 C2
Params:
in_data (384-bit data): the input to be decrypted
key (265-bit data): the key used for encryption, if left empty will use the defualt hash const
Returns:
data (348-bit data): the decrypted output
'''
# if no key is gven, then take the default consntant
if key == None:
key = HASH_CONST
data = data2planes(in_data)
state = data2planes(key)
key = key2planes(key)
for c in key:
p = bitwise_xor(state[0], state[1])
p = bitwise_xor(p, state[2])
e = cyclic(p, 1, 5)
p = cyclic(p, 1, 14) # same as (1,6) the algorithm asks for (1, 14) for some reason
e = bitwise_xor(e, p)
state[0] = bitwise_xor(state[0], c)
state[1] = cyclic(state[1], 1, 0)
state[2] = cyclic(state[1], 0, 11) # again same as (1, 3)
b0 = bitwise_not(state[1])
b1 = bitwise_not(state[2])
b2 = bitwise_not(state[0])
b0 = bitwise_xor(b0, state[2])
b1 = bitwise_xor(b1, state[0])
b2 = bitwise_xor(b2, state[1])
state[0] = bitwise_xor(state[0], b0)
state[1] = bitwise_xor(state[1], b1)
state[2] = bitwise_xor(state[2], b2)
state[1] = cyclic(state[1], 0, 1)
state[2] = cyclic(state[1], 2, 8) # again same as (2, 0)
data[0] = bitwise_xor(data[0], state[0])
data[1] = bitwise_xor(data[1], state[1])
data[2] = bitwise_xor(data[2], state[2])
data[0], data[1], data[2] = data[1], data[2], data[0]
return planes2data(data)
def xoodyak_dec(in_data, key=None):
'''
Reverses a round of xoodoo as explained in algorithm 1 C2
Params:
in_data (384-bit data): the input to be decrypted
key (265-bit data): the key used for encryption, if left empty will use the defualt hash const
Returns:
data (348-bit data): the decrypted output
'''
# if no key is gven, then take the default consntant
if key == None:
key = HASH_CONST
data = data2planes(in_data)
state = data2planes(key)
key = key2planes(key)
xors = []
for c in key:
p = bitwise_xor(state[0], state[1])
p = bitwise_xor(p, state[2])
e = cyclic(p, 1, 5)
p = cyclic(p, 1, 14) # same as (1,6) the algorithm asks for (1, 14) for some reason
e = bitwise_xor(e, p)
state[0] = bitwise_xor(state[0], c)
state[1] = cyclic(state[1], 1, 0)
state[2] = cyclic(state[1], 0, 11) # again same as (1, 3)
b0 = bitwise_not(state[1])
b1 = bitwise_not(state[2])
b2 = bitwise_not(state[0])
b0 = bitwise_xor(b0, state[2])
b1 = bitwise_xor(b1, state[0])
b2 = bitwise_xor(b2, state[1])
state[0] = bitwise_xor(state[0], b0)
state[1] = bitwise_xor(state[1], b1)
state[2] = bitwise_xor(state[2], b2)
state[1] = cyclic(state[1], 0, 1)
state[2] = cyclic(state[1], 2, 8) # again same as (2, 0)
xors.append(state.copy())
xors.reverse()
for xor in xors:
data[1], data[2], data[0] = data[0], data[1], data[2]
data[0] = bitwise_xor(data[0], xor[0])
data[1] = bitwise_xor(data[1], xor[1])
data[2] = bitwise_xor(data[2], xor[2])
return planes2data(data) |
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# More complex Hazards Tests
#
# Author:
# ----------------------------------------------------------------------------
scripts = [
{
"name": "Hazards0",
"productType": None,
"commentary": "Deleting hazard grids.",
"deleteGrids": [
("Fcst", "Hazards", "SFC", "all", "all"),
],
"clearHazardsTable": 1,
"checkStrings": [],
},
{
"name": "Hazards1",
"commentary": "Setting up CF.Y for three zones to generate a NEW vtec.",
"productType": "Hazard_CFW_Local",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 22, 28, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 22, 28, "CF.Y", ["FLZ148", "FLZ149", "FLZ050"]),
],
"checkStrings": [
"Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ050-148-149-010800-",
"/O.NEW.KTBW.CF.Y.0001.100101T2200Z-100102T0400Z/",
"Pinellas-Coastal Hernando-Coastal Pasco-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY IN EFFECT FROM 5 PM TO 11 PM EST FRIDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Coastal Flood Advisory, which is in effect from 5 PM to 11 PM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"&&",
"$$",
],
},
{
"name": "Hazards2",
"productType": "Hazard_CFW_Local",
"commentary": "No grid changes, CF.Y in three zones to generate a CON vtec.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 22, 28, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 22, 28, "CF.Y", ["FLZ148", "FLZ149", "FLZ050"]),
],
"checkStrings": [
"Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ050-148-149-010800-",
"/O.CON.KTBW.CF.Y.0001.100101T2200Z-100102T0400Z/",
"Pinellas-Coastal Hernando-Coastal Pasco-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY REMAINS IN EFFECT FROM 5 PM TO 11 PM EST FRIDAY...",
# "A Coastal Flood Advisory remains in effect from 5 PM to 11 PM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"&&",
"$$",
],
},
{
"name": "Hazards3",
"productType": "Hazard_CFW_Local",
"commentary": "Extending ending time of the CF.Y in three zones to generate a EXT vtec.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 22, 34, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 22, 34, "CF.Y", ["FLZ148", "FLZ149", "FLZ050"]),
],
"checkStrings": [
"Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ050-148-149-010800-",
"/O.EXT.KTBW.CF.Y.0001.100101T2200Z-100102T1000Z/",
"Pinellas-Coastal Hernando-Coastal Pasco-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY NOW IN EFFECT FROM 5 PM FRIDAY TO 5 AM EST SATURDAY...",
# "The Coastal Flood Advisory is now in effect from 5 PM Friday to 5 AM EST Saturday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"&&",
"$$",
],
},
{
"name": "Hazards4",
"productType": "Hazard_CFW_Local",
"commentary": "Moving starting time earlier for the CF.Y in three zones to generate a EXT vtec.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 22, 34, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 16, 28, "CF.Y", ["FLZ148", "FLZ149", "FLZ050"]),
],
"checkStrings": [
"Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ050-148-149-010800-",
"/O.EXT.KTBW.CF.Y.0001.100101T1600Z-100102T0400Z/",
"Pinellas-Coastal Hernando-Coastal Pasco-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY NOW IN EFFECT FROM 11 AM TO 11 PM EST FRIDAY...",
# "The Coastal Flood Advisory is now in effect from 11 AM to 11 PM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"&&",
"$$",
],
},
{
"name": "Hazards5",
"productType": "Hazard_CFW_Local",
"commentary": "Adding the CF.Y to one more zone to generate a EXA with CON vtec in two segments.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 16, 28, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 16, 28, "CF.Y", ["FLZ148", "FLZ149", "FLZ050", "FLZ165"]),
],
"checkStrings": [
"Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ165-010800-",
"/O.EXA.KTBW.CF.Y.0001.100101T1600Z-100102T0400Z/",
"Coastal Lee-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY IN EFFECT FROM 11 AM TO 11 PM EST FRIDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Coastal Flood Advisory, which is in effect from 11 AM to 11 PM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"$$",
"FLZ050-148-149-010800-",
"/O.CON.KTBW.CF.Y.0001.100101T1600Z-100102T0400Z/",
"Pinellas-Coastal Hernando-Coastal Pasco-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY REMAINS IN EFFECT FROM 11 AM TO 11 PM EST FRIDAY...",
# "A Coastal Flood Advisory remains in effect from 11 AM to 11 PM EST Friday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"&&",
"$$",
],
},
{
"name": "Hazards6",
"productType": "Hazard_CFW_Local",
"commentary": "Changing the ending time of the CF.Y and adding another zone to generate a EXB with EXT vtec in two segments.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 16, 28, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 22, 34, "CF.Y", ["FLZ148", "FLZ149", "FLZ050", "FLZ165", "FLZ139"]),
],
"checkStrings": [
"Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ139-010800-",
"/O.EXB.KTBW.CF.Y.0001.100101T2200Z-100102T1000Z/",
"Levy-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY IN EFFECT FROM 5 PM FRIDAY TO 5 AM EST SATURDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Coastal Flood Advisory, which is in effect from 5 PM Friday to 5 AM EST Saturday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"$$",
"FLZ050-148-149-165-010800-",
"/O.EXT.KTBW.CF.Y.0001.100101T2200Z-100102T1000Z/",
"Pinellas-Coastal Hernando-Coastal Pasco-Coastal Lee-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY NOW IN EFFECT FROM 5 PM FRIDAY TO 5 AM EST SATURDAY...",
# "The Coastal Flood Advisory is now in effect from 5 PM Friday to 5 AM EST Saturday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"&&",
"$$",
],
},
{
"name": "Hazards7",
"productType": "Hazard_CFW_Local",
"commentary": "Removing the CF.Y from two zones, but leaving it in the other three zones to generate a CAN and CON vtec in two segments.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 22, 34, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 22, 34, "CF.Y", ["FLZ148", "FLZ149", "FLZ050"]),
],
"checkStrings": [
"Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ139-165-010100-",
"/O.CAN.KTBW.CF.Y.0001.100101T2200Z-100102T1000Z/",
"Coastal Levy-Coastal Lee-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY IS CANCELLED...",
"The National Weather Service in Tampa Bay Ruskin has cancelled the Coastal Flood Advisory.",
"$$",
"FLZ050-148-149-010800-",
"/O.CON.KTBW.CF.Y.0001.100101T2200Z-100102T1000Z/",
"Pinellas-Coastal Hernando-Coastal Pasco-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD ADVISORY REMAINS IN EFFECT FROM 5 PM FRIDAY TO 5 AM EST SATURDAY...",
# "A Coastal Flood Advisory remains in effect from 5 PM Friday to 5 AM EST Saturday.",
# "|* SEGMENT TEXT GOES HERE *|.",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"&&",
"$$",
],
},
{
"name": "Hazards8",
"productType": "Hazard_CFW_Local",
"commentary": "Upgrading the CF.Y to a CF.W to geenerate a UPG/NEW vtec.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 22, 34, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 22, 34, "CF.W", ["FLZ148", "FLZ149", "FLZ050"]),
],
"checkStrings": [
"URGENT - IMMEDIATE BROADCAST REQUESTED",
"Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ050-148-149-010800-",
"/O.UPG.KTBW.CF.Y.0001.100101T2200Z-100102T1000Z/",
"/O.NEW.KTBW.CF.W.0001.100101T2200Z-100102T1000Z/",
"Pinellas-Coastal Hernando-Coastal Pasco-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD WARNING IN EFFECT FROM 5 PM FRIDAY TO 5 AM EST SATURDAY...",
"The National Weather Service in Tampa Bay Ruskin has issued a Coastal Flood Warning, which is in effect from 5 PM Friday to 5 AM EST Saturday. The Coastal Flood Advisory is no longer in effect.",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Coastal Flood Warning means that flooding is occurring or imminent. Coastal residents in the warned area should be alert for rising water, and take appropriate action to protect life and property.",
"&&",
"$$",
],
},
{
"name": "Hazards9",
"productType": "Hazard_CFW_Local",
"commentary": "Adding a new CF.Y event in one zone, while leaving the existing CF.W in the other zones, to generate two segments with a NEW/CON CF.Y/CF.W and CON CF.W.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 35, 41, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 35, 41, "CF.Y", ["FLZ149"]),
],
"checkStrings": [
"Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ149-010800-",
"/O.NEW.KTBW.CF.Y.0002.100102T1100Z-100102T1700Z/",
"/O.CON.KTBW.CF.W.0001.100101T2200Z-100102T1000Z/",
"Pasco-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD WARNING REMAINS IN EFFECT FROM 5 PM FRIDAY TO 5 AM EST SATURDAY...",
"...COASTAL FLOOD ADVISORY IN EFFECT FROM 6 AM TO NOON EST SATURDAY...",
# "The National Weather Service in Tampa Bay Ruskin has issued a Coastal Flood Advisory, which is in effect from 6 AM to noon EST Saturday. A Coastal Flood Warning remains in effect from 5 PM Friday to 5 AM EST Saturday.",
"The National Weather Service in Tampa Bay Ruskin has issued a Coastal Flood Advisory, which is in effect from 6 AM to noon EST Saturday.",
"PRECAUTIONARY/PREPAREDNESS ACTIONS...",
"A Coastal Flood Warning means that flooding is occurring or imminent. Coastal residents in the warned area should be alert for rising water, and take appropriate action to protect life and property.",
"A Coastal Flood Advisory indicates that onshore winds and tides will combine to generate flooding of low areas along the shore.",
"&&",
"$$",
"FLZ050-148-010800-",
"/O.CON.KTBW.CF.W.0001.100101T2200Z-100102T1000Z/",
"Pinellas-Coastal Hernando-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD WARNING REMAINS IN EFFECT FROM 5 PM FRIDAY TO 5 AM EST SATURDAY...",
# "A Coastal Flood Warning remains in effect from 5 PM Friday to 5 AM EST Saturday.",
"A Coastal Flood Warning means that flooding is occurring or imminent. Coastal residents in the warned area should be alert for rising water, and take appropriate action to protect life and property.",
"$$",
],
},
{
"name": "Hazards10",
"productType": "Hazard_CFW_Local",
"commentary": "Removing all hazards, to generate CAN statements for CF.W and CF.Y events, in separate segments.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 16, 41, "<None>", "all"),
],
"checkStrings": ["Coastal Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"700 PM EST Thu Dec 31 2009",
"FLZ149-",
"/O.CAN.KTBW.CF.W.0001.100101T2200Z-100102T1000Z/",
"/O.CAN.KTBW.CF.Y.0002.100102T1100Z-100102T1700Z/",
"Pasco-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD WARNING IS CANCELLED...",
"...COASTAL FLOOD ADVISORY IS CANCELLED...",
"The National Weather Service in Tampa Bay Ruskin has cancelled the Coastal Flood Warning. The Coastal Flood Advisory has been cancelled.",
"FLZ050-148",
"/O.CAN.KTBW.CF.W.0001.100101T2200Z-100102T1000Z/",
"Pinellas-Coastal Hernando-",
"700 PM EST Thu Dec 31 2009",
"...COASTAL FLOOD WARNING IS CANCELLED...",
"The National Weather Service in Tampa Bay Ruskin has cancelled the Coastal Flood Warning.",
],
},
{
"name": "Hazards11",
"productType": "Hazard_CFW_Local",
"commentary": "Generating a LS.S in one zone, to generate NEW vtec.",
"createGrids": [
("Fcst", "Hazards", "DISCRETE", 20, 28, "<None>", "all"),
("Fcst", "Hazards", "DISCRETE", 20, 28, "LS.S", ["FLZ139"]),
],
"checkStrings": ["Lakeshore Hazard Message",
"National Weather Service Tampa Bay Ruskin FL",
"/O.NEW.KTBW.LS.S.0001.100101T2000Z-100102T0400Z/",
"Levy-",
# "|* STATEMENT TEXT GOES HERE *|.",
"$$",
],
},
{
"name": "HazardsCleanup",
"commentary": "Cleanup of hazards grids and hazard table",
"productType": None,
"deleteGrids": [
("Fcst", "Hazards", "SFC", "all", "all"),
],
"clearHazardsTable": 1,
"checkStrings": [],
},
]
import TestScript
def testScript(self, dataMgr):
defaults = {
"cmdLineVars" : "{('Issued By', 'issuedBy'): None}",
"gridsStartTime": "20100101_0000",
"database": "<site>_GRID__Fcst_00000000_0000",
"publishGrids": 0,
"decodeVTEC": 1,
"orderStrings": 1,
}
return TestScript.generalTestScript(self, dataMgr, scripts, defaults)
|
import copy
import json
import logging
import math
import csv
from mrtarget.Settings import Config, file_or_resource
from mrtarget.constants import Const
from mrtarget.common.DataStructure import JSONSerializable, PipelineEncoder
from mrtarget.common.IO import check_to_open, URLZSource
from mrtarget.modules import GeneData
from mrtarget.modules.ECO import ECO, load_eco_scores_table
from mrtarget.modules.EFO import EFO, get_ontology_code_from_url
from mrtarget.modules.GeneData import Gene
logger = logging.getLogger(__name__)
class DataNormaliser(object):
def __init__(self, min_value, max_value, old_min_value=0., old_max_value=1., cap=True):
'''just set all initial values and ranges'''
self.min = float(min_value)
self.max = float(max_value)
self.old_min = old_min_value
self.old_max = old_max_value
self.cap = cap
def __call__(self, value):
'''apply method to wrap the normalization function'''
return self.renormalize(value,
(self.old_min, self.old_max),
(self.min, self.max),
self.cap)
@staticmethod
def renormalize(n, start_range, new_range, cap=True):
'''apply the function f(x) to n using and old (start_range) and a new range
where f(x) = (dNewRange / dOldRange * (n - old_range_lower_bound)) + new_lower
if cap is True then f(n) will be capped to new range boundaries
'''
n = float(n)
max_new_range = max(new_range)
min_new_range = min(new_range)
delta1 = start_range[1] - start_range[0]
delta2 = new_range[1] - new_range[0]
if delta1 or delta2:
try:
normalized = (delta2 * (n - start_range[0]) / delta1) + new_range[0]
except ZeroDivisionError:
normalized = new_range[0]
else:
normalized = n
if cap:
if normalized > max_new_range:
return max_new_range
elif normalized < min_new_range:
return min_new_range
return normalized
class ExtendedInfo():
data = dict()
def extract_info(self, obj):
raise NotImplementedError()
def to_json(self):
return json.dumps(self.data)
def load_json(self, data):
self.data = json.loads(data)
class ExtendedInfoGene(ExtendedInfo):
'''minimal info from Gene class'''
root = "gene_info"
def __init__(self, gene):
if isinstance(gene, Gene):
self.extract_info(gene)
else:
raise AttributeError("you need to pass a Gene not a: " + str(type(gene)))
def extract_info(self, gene):
self.data = dict(geneid=gene.id,
symbol=gene.approved_symbol or gene.ensembl_external_name,
name=gene.approved_name or gene.ensembl_description)
class ExtendedInfoEFO(ExtendedInfo):
'''getting from and EFO obj label id and building 2 sets of area codes
and area labels
'''
root = "efo_info"
def __init__(self, efo):
if isinstance(efo, EFO):
self.extract_info(efo)
else:
raise AttributeError("you need to pass a EFO not a: " + str(type(efo)))
def extract_info(self, efo):
therapeutic_area_codes = set()
therapeutic_area_labels = set()
for i, path_codes in enumerate(efo.path_codes):
if len(path_codes) > 1:
therapeutic_area_codes.add(path_codes[0])
therapeutic_area_labels.add(efo.path_labels[i][0])
self.data = dict(efo_id=efo.get_id(),
label=efo.label,
path=efo.path_codes,
therapeutic_area=dict(codes=list(therapeutic_area_codes),
labels=list(therapeutic_area_labels)))
class ExtendedInfoECO(ExtendedInfo):
root = "evidence_codes_info"
def __init__(self, eco):
if isinstance(eco, ECO):
self.extract_info(eco)
else:
raise AttributeError("you need to pass a ECO not a: " + str(type(eco)))
def extract_info(self, eco):
self.data = dict(eco_id=eco.get_id(),
label=eco.label),
class EvidenceManager():
def __init__(self, lookup_data, eco_scores_uri, excluded_biotypes, datasources_to_datatypes):
self.logger = logging.getLogger(__name__)
self.available_genes = lookup_data.available_genes
self.available_efos = lookup_data.available_efos
self.available_ecos = lookup_data.available_ecos
self.uni2ens = lookup_data.uni2ens
self.non_reference_genes = lookup_data.non_reference_genes
self._get_eco_scoring_values(self.available_ecos, eco_scores_uri)
self.uni_header = GeneData.UNI_ID_ORG_PREFIX
self.ens_header = GeneData.ENS_ID_ORG_PREFIX
self.excluded_biotypes = excluded_biotypes
self.datasources_to_datatypes = datasources_to_datatypes
self._get_score_modifiers()
# @do_profile()#follow=[])
def fix_evidence(self, evidence):
evidence = evidence.evidence
fixed = False
# fix errors in data here so nobody needs to ask corrections to the data provider
# fix missing version in gwas catalog data
if 'variant2disease' in evidence:
try:
float(evidence['evidence']['variant2disease']['provenance_type']['database']['version'])
except:
evidence['evidence']['variant2disease']['provenance_type']['database']['version'] = ''
fixed = True
try:
float(evidence['evidence']['variant2disease']['provenance_type']['database']['dbxref']['version'])
except:
evidence['evidence']['variant2disease']['provenance_type']['database']['dbxref']['version'] = ''
fixed = True
if 'gene2variant' in evidence:
try:
float(evidence['evidence']['gene2variant']['provenance_type']['database']['version'])
except:
evidence['evidence']['gene2variant']['provenance_type']['database']['version'] = ''
fixed = True
try:
float(evidence['evidence']['gene2variant']['provenance_type']['database']['dbxref']['version'])
except:
evidence['evidence']['gene2variant']['provenance_type']['database']['dbxref']['version'] = ''
fixed = True
# Split EVA in two datasources depending on the datatype
if (evidence['sourceID'] == 'eva') and \
(evidence['type'] == 'somatic_mutation'):
evidence['sourceID'] = 'eva_somatic'
fixed = True
# Move genetic_literature to genetic_association
if evidence['type'] == 'genetic_literature':
evidence['type'] = 'genetic_association'
if 'provenance_type' in evidence and \
'database' in evidence['provenance_type'] and \
'version' in evidence['provenance_type']['database']:
evidence['provenance_type']['database']['version'] = str(evidence['provenance_type']['database']['version'])
# Enforce eco-based score for genetic_association evidencestrings
if evidence['type'] == 'genetic_association':
available_score = None
eco_uri = None
try:
available_score = evidence['evidence']['gene2variant']['resource_score']['value']
except KeyError:
if 'resource_score' in evidence['evidence'] and \
'value' in evidence['evidence']['resource_score']:
available_score = evidence['evidence']['resource_score']['value']
try:
eco_uri = evidence['evidence']['gene2variant']['functional_consequence']
if 'evidence_codes' in evidence['evidence']:
eco_uri = evidence['evidence']['evidence_codes']
except KeyError:
if 'evidence_codes' in evidence['evidence']:
eco_uri = evidence['evidence']['evidence_codes'][0]
eco_uri.rstrip()
if eco_uri in self.eco_scores:
if 'gene2variant' in evidence['evidence']:
if 'resource_score' not in evidence['evidence']['gene2variant']:
evidence['evidence']['gene2variant']['resource_score'] = {}
evidence['evidence']['gene2variant']['resource_score']['value'] = self.eco_scores[eco_uri]
evidence['evidence']['gene2variant']['resource_score']['type'] = 'probability'
if available_score != self.eco_scores[eco_uri]:
fixed = True
else:
self.logger.warning("Cannot find a score for eco code %s in evidence id %s" % (eco_uri, evidence['id']))
# Remove identifiers.org from genes and map to ensembl ids
EvidenceManager.fix_target_id(evidence,
self.uni2ens,
self.available_genes,
self.non_reference_genes
)
# Remove identifiers.org from cttv activity and target type ids
if 'target_type' in evidence['target']:
evidence['target']['target_type'] = evidence['target']['target_type'].split('/')[-1]
if 'activity' in evidence['target']:
evidence['target']['activity'] = evidence['target']['activity'].split('/')[-1]
# Remove identifiers.org from efos
EvidenceManager.fix_disease_id(evidence)
# Remove identifiers.org from ecos
new_eco_ids = []
if 'evidence_codes' in evidence['evidence']:
eco_ids = evidence['evidence']['evidence_codes']
elif 'variant2disease' in evidence['evidence']:
if 'variant2disease' in evidence['evidence']:
eco_ids = evidence['evidence']['variant2disease']['evidence_codes']
if 'gene2variant' in evidence['evidence']:
eco_ids.extend(evidence['evidence']['gene2variant']['evidence_codes'])
elif 'target2drug' in evidence['evidence']:
eco_ids = evidence['evidence']['target2drug']['evidence_codes']
eco_ids.extend(evidence['evidence']['drug2clinic']['evidence_codes'])
elif 'biological_model' in evidence['evidence']:
eco_ids = evidence['evidence']['biological_model']['evidence_codes']
else:
eco_ids = [] # something wrong here...
eco_ids = list(set(eco_ids))
for idorg_eco_uri in eco_ids:
code = get_ontology_code_from_url(idorg_eco_uri.strip())
if code is not None:
# if len(code.split('_')) != 2:
# self.logger.warning("could not recognize evidence code: %s in id %s | added anyway" %(evidence['id'],
# idorg_eco_uri))
new_eco_ids.append(code)
evidence['evidence']['evidence_codes'] = list(set(new_eco_ids))
if not new_eco_ids:
self.logger.warning("No valid ECO could be found in evidence: %s. original ECO mapping: %s" % (
evidence['id'], str(eco_ids)[:100]))
return Evidence(evidence,self.datasources_to_datatypes), fixed
@staticmethod
def normalise_target_id(evidence, uni2ens, available_genes,non_reference_genes ):
target_id = evidence['target']['id']
new_target_id = None
id_not_in_ensembl = False
try:
if target_id.startswith(GeneData.UNI_ID_ORG_PREFIX):
if '-' in target_id:
target_id = target_id.split('-')[0]
uniprotid = target_id.split(GeneData.UNI_ID_ORG_PREFIX)[1].strip()
ensemblid = uni2ens[uniprotid]
new_target_id = EvidenceManager.get_reference_ensembl_id(ensemblid,
available_genes=available_genes,
non_reference_genes=non_reference_genes)
elif target_id.startswith(GeneData.ENS_ID_ORG_PREFIX):
ensemblid = target_id.split(GeneData.ENS_ID_ORG_PREFIX)[1].strip()
new_target_id = EvidenceManager.get_reference_ensembl_id(ensemblid,
available_genes=available_genes,
non_reference_genes=non_reference_genes)
else:
logger.warning("could not recognize target.id: %s | not added" % target_id)
id_not_in_ensembl = True
except KeyError:
logger.error("cannot find an ensembl ID for: %s" % target_id)
id_not_in_ensembl = True
return new_target_id, id_not_in_ensembl
def is_excluded_by_biotype(self, datasource, gene_id):
is_excluded = False
if datasource in self.excluded_biotypes:
gene_obj = self.available_genes[gene_id]
if gene_obj['biotype'] in self.excluded_biotypes[datasource]:
is_excluded = True
return is_excluded
@staticmethod
def fix_target_id(evidence,uni2ens, available_genes, non_reference_genes, logger=logging.getLogger(__name__)) :
target_id = evidence['target']['id']
try:
new_target_id, id_not_in_ensembl = EvidenceManager.normalise_target_id(evidence,
uni2ens,
available_genes,
non_reference_genes)
except KeyError:
logger.error("cannot find an ensembl ID for: %s" % target_id)
id_not_in_ensembl = True
new_target_id = target_id
if id_not_in_ensembl:
logger.warning("cannot find any ensembl ID for evidence for: %s. Offending target.id: %s",
evidence['target']['id'], target_id)
evidence['target']['id'] = new_target_id
@staticmethod
def fix_disease_id(evidence, logger=logging.getLogger(__name__)):
disease_id = evidence['disease']['id']
new_disease_id = get_ontology_code_from_url(disease_id)
if len(new_disease_id.split('_')) != 2:
logger.warning("could not recognize disease.id: %s | added anyway" % disease_id)
evidence['disease']['id'] = new_disease_id
if not new_disease_id:
logger.warning("No valid disease.id could be found in evidence: %s. Offending disease.id: %s" % (
evidence['id'], disease_id))
def check_is_valid_evs(self, evidence, datasource):
"""check consistency of the data in the evidence and returns a tuple with (is_valid, problem_str)"""
ev = evidence.evidence
evidence_id = ev['id']
if not ev['target']['id']:
problem_str = "%s Evidence %s has no valid gene in target.id" % (datasource, evidence_id)
return False, problem_str
gene_id = ev['target']['id']
if gene_id not in self.available_genes:
problem_str = "%s Evidence %s has an invalid gene id in target.id: %s" % (datasource, evidence_id, gene_id)
return False, problem_str
if not ev['disease']['id']:
problem_str = "%s Evidence %s has no valid efo id in disease.id" % (datasource, evidence_id)
return False, problem_str
efo_id = ev['disease']['id']
if efo_id not in self.available_efos:
problem_str = "%s Evidence %s has an invalid efo id in disease.id: %s" % (datasource, evidence_id, efo_id)
return False, problem_str
if self.is_excluded_by_biotype(datasource, gene_id):
problem_str = "%s Evidence %s gene_id %s is an excluded biotype" % \
(datasource, evidence_id, gene_id)
return False, problem_str
# well, it seems this evidence is probably valid
return True, ''
def is_valid(self, evidence, datasource):
'''check consistency of the data in the evidence'''
ev = evidence.evidence
evidence_id = ev['id']
if not ev['target']['id']:
self.logger.error("%s Evidence %s has no valid gene in target.id" % (datasource, evidence_id))
return False
gene_id = ev['target']['id']
if gene_id not in self.available_genes:
self.logger.error(
"%s Evidence %s has an invalid gene id in target.id: %s" % (datasource, evidence_id, gene_id))
return False
if not ev['disease']['id']:
self.logger.error("%s Evidence %s has no valid efo id in disease.id" % (datasource, evidence_id))
return False
efo_id = ev['disease']['id']
if efo_id not in self.available_efos:
self.logger.error(
"%s Evidence %s has an invalid efo id in disease.id: %s" % (datasource, evidence_id, efo_id))
return False
return True
def get_extended_evidence(self, evidence):
extended_evidence = copy.copy(evidence.evidence)
extended_evidence['private'] = dict()
# Get generic gene info
genes_info = []
pathway_data = dict(pathway_type_code=[],
pathway_code=[])
GO_terms = dict(biological_process=[],
cellular_component=[],
molecular_function=[],
)
target_class = dict(level1=[],
level2=[])
uniprot_keywords = []
# TODO: handle domains
geneid = extended_evidence['target']['id']
# try:
gene = self._get_gene_obj(geneid)
genes_info = ExtendedInfoGene(gene)
if 'reactome' in gene._private['facets']:
pathway_data['pathway_type_code'].extend(gene._private['facets']['reactome']['pathway_type_code'])
pathway_data['pathway_code'].extend(gene._private['facets']['reactome']['pathway_code'])
# except Exception:
# self.logger.warning("Cannot get generic info for gene: %s" % aboutid)
if gene.go:
for go in gene.go:
go_code, data = go['id'], go['value']
try:
category, term = data['term'][0], data['term'][2:]
if category == 'P':
GO_terms['biological_process'].append(dict(code=go_code,
term=term))
elif category == 'F':
GO_terms['molecular_function'].append(dict(code=go_code,
term=term))
elif category == 'C':
GO_terms['cellular_component'].append(dict(code=go_code,
term=term))
except:
pass
if gene.uniprot_keywords:
uniprot_keywords = gene.uniprot_keywords
if genes_info:
extended_evidence["target"][ExtendedInfoGene.root] = genes_info.data
if pathway_data['pathway_code']:
pathway_data['pathway_type_code'] = list(set(pathway_data['pathway_type_code']))
pathway_data['pathway_code'] = list(set(pathway_data['pathway_code']))
if 'chembl' in gene.protein_classification and gene.protein_classification['chembl']:
target_class['level1'].append([i['l1'] for i in gene.protein_classification['chembl'] if 'l1' in i])
target_class['level2'].append([i['l2'] for i in gene.protein_classification['chembl'] if 'l2' in i])
# Get generic efo info
# can it happen you get no efo codes but just one disease?
all_efo_codes = []
diseaseid = extended_evidence['disease']['id']
efo = self._get_efo_obj(diseaseid)
efo_info = ExtendedInfoEFO(efo)
if efo_info:
for path in efo_info.data['path']:
all_efo_codes.extend(path)
extended_evidence["disease"][ExtendedInfoEFO.root] = efo_info.data
all_efo_codes = list(set(all_efo_codes))
# Get generic eco info
try:
all_eco_codes = extended_evidence['evidence']['evidence_codes']
try:
all_eco_codes.append(
get_ontology_code_from_url(extended_evidence['evidence']['gene2variant']['functional_consequence']))
except KeyError:
pass
ecos_info = []
for eco_id in all_eco_codes:
eco = self._get_eco_obj(eco_id)
if eco is not None:
ecos_info.append(ExtendedInfoECO(eco))
else:
self.logger.warning("eco uri %s is not in the ECO LUT so it will not be considered as included", eco_id)
if ecos_info:
data = []
for eco_info in ecos_info:
data.append(eco_info.data)
extended_evidence['evidence'][ExtendedInfoECO.root] = data
except Exception as e:
extended_evidence['evidence'][ExtendedInfoECO.root] = None
all_eco_codes = []
# self.logger.exception("Cannot get generic info for eco: %s:"%str(e))
# Add private objects used just for faceting
extended_evidence['private']['efo_codes'] = all_efo_codes
extended_evidence['private']['eco_codes'] = all_eco_codes
extended_evidence['private']['datasource'] = evidence.datasource
extended_evidence['private']['datatype'] = evidence.datatype
extended_evidence['private']['facets'] = {}
if pathway_data['pathway_code']:
extended_evidence['private']['facets']['reactome'] = pathway_data
if uniprot_keywords:
extended_evidence['private']['facets']['uniprot_keywords'] = uniprot_keywords
if GO_terms['biological_process'] or \
GO_terms['molecular_function'] or \
GO_terms['cellular_component']:
extended_evidence['private']['facets']['go'] = GO_terms
if target_class['level1']:
extended_evidence['private']['facets']['target_class'] = target_class
return Evidence(extended_evidence, self.datasources_to_datatypes)
def _get_gene_obj(self, geneid):
gene = Gene(geneid)
gene.load_json(self.available_genes[geneid])
return gene
def _get_efo_obj(self, efoid):
efo = EFO(efoid)
efo.load_json(self.available_efos[efoid])
return efo
def _get_eco_obj(self, ecoid):
try:
eco = ECO(ecoid)
eco.load_json(self.available_ecos[ecoid])
return eco
except KeyError:
return None
def _get_non_reference_gene_mappings(self):
self.non_reference_genes = {}
skip_header = True
for line in file(file_or_resource('genes_with_non_reference_ensembl_ids.tsv')):
if skip_header:
skip_header = False
symbol, ensg, assembly, chr, is_ref = line.split()
if symbol not in self.non_reference_genes:
self.non_reference_genes[symbol] = dict(reference='',
alternative=[])
if is_ref == 't':
self.non_reference_genes[symbol]['reference'] = ensg
else:
self.non_reference_genes[symbol]['alternative'].append(ensg)
@staticmethod
def _map_to_reference_ensembl_gene(ensg, non_reference_genes, logger=logging.getLogger(__name__)):
for symbol, data in non_reference_genes.items():
if ensg in data['alternative']:
logger.warning(
"Mapped non reference ensembl gene id %s to %s for gene %s" % (ensg, data['reference'], symbol))
return data['reference']
@staticmethod
def get_reference_ensembl_id(ensemblid, available_genes, non_reference_genes):
if ensemblid not in available_genes:
ensemblid = EvidenceManager._map_to_reference_ensembl_gene(ensemblid, non_reference_genes) or ensemblid
return ensemblid
def _get_eco_scoring_values(self, eco_lut_obj, eco_scores_uri):
self.eco_scores = load_eco_scores_table(
filename=eco_scores_uri,
eco_lut_obj=eco_lut_obj)
#TODO remove this
def _get_score_modifiers(self):
self.score_modifiers = {}
class Evidence(JSONSerializable):
def __init__(self, evidence, datasources_to_datatypes):
self.logger = logging.getLogger(__name__)
if isinstance(evidence, str) or isinstance(evidence, unicode):
self.load_json(evidence)
elif isinstance(evidence, dict):
self.evidence = evidence
else:
raise AttributeError(
"the evidence should be a dict or a json string to parse, not a " + str(type(evidence)))
self.datasource = self.evidence['sourceID']
self.datatype = datasources_to_datatypes[self.datasource]
def get_doc_name(self):
return Const.ELASTICSEARCH_DATA_DOC_NAME + '-' + self.datasource.lower()
def get_id(self):
return self.evidence['id']
def to_json(self):
return json.dumps(self.evidence,
sort_keys=True,
# indent=4,
cls=PipelineEncoder)
def load_json(self, data):
self.evidence = json.loads(data)
def score_evidence(self, modifiers={}):
self.evidence['scores'] = dict(association_score=0.,
)
try:
if self.evidence['type'] == 'known_drug':
self.evidence['scores']['association_score'] = \
float(self.evidence['evidence']['drug2clinic']['resource_score']['value']) * \
float(self.evidence['evidence']['target2drug']['resource_score']['value'])
elif self.evidence['type'] == 'rna_expression':
pvalue = self._get_score_from_pvalue_linear(self.evidence['evidence']['resource_score']['value'])
log2_fold_change = self.evidence['evidence']['log2_fold_change']['value']
fold_scale_factor = abs(log2_fold_change) / 10.
rank = self.evidence['evidence']['log2_fold_change']['percentile_rank'] / 100.
score = pvalue * fold_scale_factor * rank
if score > 1:
score = 1.
self.evidence['scores']['association_score'] = score
elif self.evidence['type'] == 'genetic_association':
score = 0.
if 'gene2variant' in self.evidence['evidence']:
if self.evidence['sourceID'] in ['phewas_catalog','twentythreeandme']:
no_of_cases = self.evidence['unique_association_fields']['cases']
score = self._score_phewas_data(self.evidence['sourceID'],
self.evidence['evidence']['variant2disease']['resource_score'][
'value'],
no_of_cases)
else:
g2v_score = self.evidence['evidence']['gene2variant']['resource_score']['value']
if self.evidence['evidence']['variant2disease']['resource_score']['type'] == 'pvalue':
v2d_score = self._get_score_from_pvalue_linear(
self.evidence['evidence']['variant2disease']['resource_score']['value'])
elif self.evidence['evidence']['variant2disease']['resource_score']['type'] == 'probability':
v2d_score = self.evidence['evidence']['variant2disease']['resource_score']['value']
else:
# this should not happen?
v2d_score = 0.
if self.evidence['sourceID'] == 'gwas_catalog':
sample_size = self.evidence['evidence']['variant2disease']['gwas_sample_size']
p_value = self.evidence['evidence']['variant2disease']['resource_score']['value']
# this is something to take into account for postgap data when I refactor this
r2_value = float(1)
if 'r2' in self.evidence['unique_association_fields']:
r2_value = float(self.evidence['unique_association_fields']['r2'])
score = self._score_gwascatalog(p_value, sample_size, g2v_score, r2_value)
else:
score = g2v_score * v2d_score
else:
if self.evidence['evidence']['resource_score']['type'] == 'probability':
score = self.evidence['evidence']['resource_score']['value']
elif self.evidence['evidence']['resource_score']['type'] == 'pvalue':
score = self._get_score_from_pvalue_linear(self.evidence['evidence']['resource_score']['value'])
self.evidence['scores']['association_score'] = score
elif self.evidence['type'] == 'animal_model':
self.evidence['scores']['association_score'] = float(
self.evidence['evidence']['disease_model_association']['resource_score']['value'])
elif self.evidence['type'] == 'somatic_mutation':
frequency = 1.
if 'known_mutations' in self.evidence['evidence'] and self.evidence['evidence']['known_mutations']:
sample_total_coverage = 1.
max_sample_size = 1.
for mutation in self.evidence['evidence']['known_mutations']:
if 'number_samples_with_mutation_type' in mutation:
sample_total_coverage += int(mutation['number_samples_with_mutation_type'])
if int(mutation['number_mutated_samples']) > max_sample_size:
max_sample_size = int(mutation['number_mutated_samples'])
if sample_total_coverage > max_sample_size:
sample_total_coverage = max_sample_size
frequency = DataNormaliser.renormalize(sample_total_coverage / max_sample_size, [0., 9.], [.5, 1.])
self.evidence['scores']['association_score'] = float(
self.evidence['evidence']['resource_score']['value']) * frequency
elif self.evidence['type'] == 'literature':
score = float(self.evidence['evidence']['resource_score']['value'])
if self.evidence['sourceID'] == 'europepmc':
score = score / 100.
if score > 1:
score = 1.
self.evidence['scores']['association_score'] = score
elif self.evidence['type'] == 'affected_pathway':
# TODO: Implement two types of scoring for sysbio - based on p-value range & based on rank-based score range
if self.evidence['sourceID'] == 'sysbio':
score = float(self.evidence['evidence']['resource_score']['value'])
elif self.evidence['evidence']['resource_score']['type']== 'pvalue':
score = self._get_score_from_pvalue_linear(float(self.evidence['evidence']['resource_score']['value']),
range_min=1e-4, range_max=1e-14,
out_range_min=0.5, out_range_max=1.0)
else:
score = float(
self.evidence['evidence']['resource_score']['value'])
self.evidence['scores']['association_score'] = score
except Exception as e:
self.logger.error(
"Cannot score evidence %s of type %s. Error: %s" % (self.evidence['id'], self.evidence['type'], e))
# Apply rescaling to scores
if self.evidence['sourceID'] in modifiers:
self.evidence['scores']['association_score'] = modifiers[self.evidence['sourceID']](
self.evidence['scores']['association_score'])
@staticmethod
def _get_score_from_pvalue_linear(pvalue, range_min=1, range_max=1e-10, out_range_min=0., out_range_max=1.):
"""rescale transformed p-values from [range_min, range_max] to [out_range_min, out_range_max]"""
def get_log(n):
try:
return math.log10(n)
except ValueError:
return math.log10(range_max)
min_score = get_log(range_min)
max_score = get_log(range_max)
score = get_log(pvalue)
return DataNormaliser.renormalize(score, [min_score, max_score], [out_range_min, out_range_max])
def _score_gwascatalog(self, pvalue, sample_size, g2v_value, r2_value):
normalised_pvalue = self._get_score_from_pvalue_linear(pvalue, range_min=1, range_max=1e-15)
normalised_sample_size = DataNormaliser.renormalize(sample_size, [0, 5000], [0, 1])
score = normalised_pvalue * normalised_sample_size * g2v_value * r2_value
return score
def _score_phewas_data(self, source, pvalue, no_of_cases):
if source == 'phewas_catalog':
max_cases = 8800
range_min = 0.05
range_max = 1e-25
elif source == 'twentythreeandme':
max_cases = 297901
range_min = 0.05
range_max = 1e-30
normalised_pvalue = self._get_score_from_pvalue_linear(float(pvalue), range_min, range_max)
normalised_no_of_cases = DataNormaliser.renormalize(no_of_cases, [0, max_cases], [0, 1])
score = normalised_pvalue * normalised_no_of_cases
return score
|
from fidget.backend.QtWidgets import QLineEdit, QHBoxLayout
from fidget.core.__util__ import first_valid
from fidget.widgets.rawstring import FidgetRawString
class FidgetLine(FidgetRawString):
"""
A string Fidget, in the form of a QLineEdit
"""
MAKE_INDICATOR = MAKE_TITLE = MAKE_PLAINTEXT = False
PLACEHOLDER = True
def __init__(self, title: str, placeholder=None,
**kwargs):
super().__init__(title, **kwargs)
placeholder = first_valid(placeholder=placeholder, PLACEHOLDER=self.PLACEHOLDER, _self=self)
self.edit: QLineEdit = None
self.init_ui(placeholder=placeholder and self.title)
self.fill_initial()
def init_ui(self, placeholder=None):
super().init_ui()
layout = QHBoxLayout(self)
with self.setup_provided(layout):
self.edit = QLineEdit()
if placeholder:
self.edit.setPlaceholderText(placeholder)
self.edit.textChanged.connect(self.change_value)
layout.addWidget(self.edit)
self.setFocusProxy(self.edit)
return layout
def parse(self):
return self.edit.text()
def fill(self, v: str = ''):
self.edit.setText(v)
def fill_stylesheet(self, style):
self.edit.setStyleSheet(style)
|
# ██╗ ██╗██████╗ ███╗ ███╗███████╗ █████╗ ██╗
# ██║ ██║██╔══██╗████╗ ████║██╔════╝██╔══██╗██║
# ███████║██║ ██║██╔████╔██║█████╗ ███████║██║
# ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ ██╔══██║██║
# ██║ ██║██████╔╝██║ ╚═╝ ██║███████╗██║ ██║███████╗
# ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚══════╝
# Copyright 2019-2020, Hyungyo Seo
# schedule_parser.py - NEIS 서버에 접속하여 학사일정을 파싱해오는 스크립트입니다.
import datetime
import json
import urllib.error
import urllib.request
from itertools import groupby
from modules.common import conf, log
# 설정 불러오기
NEIS_OPENAPI_TOKEN = conf.configs['Tokens']['NEIS'] # NEUS 오픈API 인증 토큰
ATPT_OFCDC_SC_CODE = conf.configs['School']['NEIS']['ATPT_OFCDC_SC_CODE'] # 시도교육청코드
SD_SCHUL_CODE = conf.configs['School']['NEIS']['SD_SCHUL_CODE'] # 표준학교코드
def parse(year, month, req_id, debugging):
year, month = int(year), int(month)
schdls = {}
log.info("[#%s] parse@schedule_parser.py: Started Parsing Schedule(%s-%s)" % (req_id, year, month))
date_from = datetime.date(year, month, 1)
date_to = (date_from + datetime.timedelta(days=40)).replace(day=1) - datetime.timedelta(days=1)
# 다음달 첫날의 날짜를 구하고 -1일 => 이번달 말일
try:
req = urllib.request.urlopen("https://open.neis.go.kr/hub/SchoolSchedule?KEY=%s&Type=json&ATPT_OFCDC_SC_CODE"
"=%s&SD_SCHUL_CODE=%s&AA_FROM_YMD=%s&AA_TO_YMD=%s"
% (NEIS_OPENAPI_TOKEN, ATPT_OFCDC_SC_CODE, SD_SCHUL_CODE,
date_from.strftime("%Y%m%d"), date_to.strftime("%Y%m%d")), timeout=2)
except (urllib.error.HTTPError, urllib.error.URLError) as e:
log.err("[#%s] parse@schedule_parser.py: Failed to Parse Schedule(%s-%s) because %s" % (
req_id, year, month, e))
raise ConnectionError
data = json.loads(req.read())
schedules = []
for i in data["SchoolSchedule"][1]["row"]:
if i["EVENT_NM"] == "토요휴업일":
continue
date = datetime.datetime.strptime(i["AA_YMD"], "%Y%m%d").date()
related_grade = []
if i["ONE_GRADE_EVENT_YN"] == "Y": related_grade.append(1)
if i["TW_GRADE_EVENT_YN"] == "Y": related_grade.append(2)
if i["THREE_GRADE_EVENT_YN"] == "Y": related_grade.append(3)
if i["FR_GRADE_EVENT_YN"] == "Y": related_grade.append(4)
if i["FIV_GRADE_EVENT_YN"] == "Y": related_grade.append(5)
if i["SIX_GRADE_EVENT_YN"] == "Y": related_grade.append(6)
schedules.append([date, i["EVENT_NM"], related_grade])
for k, v in groupby(schedules, lambda k: k[0]):
schedule_text = ""
for item in v:
schedule_text = "%s%s(%s)\n" % (schedule_text, item[1], ", ".join("%s학년" % i for i in item[2]))
schedule_text = schedule_text[:-1].replace("()", "")
schdls[str(k.day)] = schedule_text
if schdls:
with open('data/cache/Cal-%s-%s.json' % (year, month), 'w',
encoding="utf-8") as make_file:
json.dump(schdls, make_file, ensure_ascii=False)
print("File Created")
log.info("[#%s] parse@schedule_parser.py: Succeeded(%s-%s)" % (req_id, year, month))
return 0
# 디버그
if __name__ == "__main__":
log.init()
parse(2019, 8, "****DEBUG****", True)
|
# SPDX-FileCopyrightText: 2021 Luke Granger-Brown <git@lukegb.com>
#
# SPDX-License-Identifier: MIT
from typing import Optional, TextIO
from . import der_x509
from . import enums
from . import types
class CertStoreOutput:
def __init__(self, fp: TextIO):
self.fp = fp
def output(self, cert: Optional[types.Certificate], trust: types.Trust) -> None:
if not cert:
return
print(cert.label, file=self.fp)
if trust.trust_server_auth == enums.TrustType.TRUSTED_DELEGATOR:
# Output the "plain" version for applications expecting just plain CERTIFICATE entries.
# We only do this if the cert is affirmatively trusted for being a CA for server-side auth.
print(cert.as_pem().encode(), file=self.fp)
else:
print(
"Traditional PEM block omitted: this certificate is not trusted for authenticating servers.",
file=self.fp,
)
# Output OpenSSL-style TRUSTED CERTIFICATE entries.
if trust.trusted_key_usages:
print("Trusted for:", file=self.fp)
for usage in trust.trusted_key_usages:
print(f" - {str(usage)} ({usage.name})", file=self.fp)
if trust.untrusted_key_usages:
print("Rejected for:", file=self.fp)
for usage in trust.untrusted_key_usages:
print(f" - {str(usage)} ({usage.name})", file=self.fp)
cert_aux = der_x509.OpenSSLCertAux(
trust=trust.trusted_key_usages,
reject=trust.untrusted_key_usages,
).as_der()
pem_block = der_x509.PEMBlock(
name="TRUSTED CERTIFICATE", content=cert.value + cert_aux
)
print(pem_block.encode(), file=self.fp)
|
# Generated by Django 3.2.5 on 2021-10-22 08:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0111_merge_0107_auto_20211016_0639_0110_auto_20211015_1734"),
]
operations = [
migrations.RemoveConstraint(
model_name="notification",
name="notification_type_valid",
),
migrations.AlterField(
model_name="notification",
name="notification_type",
field=models.CharField(
choices=[
("FAVORITE", "Favorite"),
("REPLY", "Reply"),
("MENTION", "Mention"),
("TAG", "Tag"),
("FOLLOW", "Follow"),
("FOLLOW_REQUEST", "Follow Request"),
("BOOST", "Boost"),
("IMPORT", "Import"),
("ADD", "Add"),
("REPORT", "Report"),
("INVITE", "Invite"),
("ACCEPT", "Accept"),
("JOIN", "Join"),
("LEAVE", "Leave"),
("REMOVE", "Remove"),
("GROUP_PRIVACY", "Group Privacy"),
("GROUP_NAME", "Group Name"),
("GROUP_DESCRIPTION", "Group Description"),
],
max_length=255,
),
),
migrations.AlterField(
model_name="user",
name="preferred_language",
field=models.CharField(
blank=True,
choices=[
("en-us", "English"),
("de-de", "Deutsch (German)"),
("es-es", "Español (Spanish)"),
("fr-fr", "Français (French)"),
("pt-br", "Português - Brasil (Brazilian Portuguese)"),
("zh-hans", "简体中文 (Simplified Chinese)"),
("zh-hant", "繁體中文 (Traditional Chinese)"),
],
max_length=255,
null=True,
),
),
migrations.AddConstraint(
model_name="notification",
constraint=models.CheckConstraint(
check=models.Q(
(
"notification_type__in",
[
"FAVORITE",
"REPLY",
"MENTION",
"TAG",
"FOLLOW",
"FOLLOW_REQUEST",
"BOOST",
"IMPORT",
"ADD",
"REPORT",
"INVITE",
"ACCEPT",
"JOIN",
"LEAVE",
"REMOVE",
"GROUP_PRIVACY",
"GROUP_NAME",
"GROUP_DESCRIPTION",
],
)
),
name="notification_type_valid",
),
),
]
|
def solution(prices):
answer, stack = [0] * len(prices), []
for i in range(len(prices)):
while stack and prices[i] < prices[stack[-1]]:
answer[stack[-1]] = i - stack[-1]
stack.pop()
stack.append(i)
for i in range(len(stack)):
answer[stack[i]] = len(prices) - stack[i] - 1
return answer
if __name__ == '__main__':
print(solution([2, 1]))
"""
def solution(prices):
result=[]
for i, v in enumerate(prices):
cnt=0
for j in range(i+1, len(prices)):
if v>prices[j]:
cnt+=1
break
else:
cnt+=1
result.append(cnt)
return result
""" |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional
import numpy as np
import torch
from mmcv.runner import auto_fp16
from mmcv.utils import ConfigDict
from mmdet.core import bbox2roi
from mmdet.models.builder import DETECTORS
from torch import Tensor
from .query_support_detector import QuerySupportDetector
@DETECTORS.register_module()
class AttentionRPNDetector(QuerySupportDetector):
"""Implementation of `AttentionRPN <https://arxiv.org/abs/1908.01998>`_.
Args:
backbone (dict): Config of the backbone for query data.
neck (dict | None): Config of the neck for query data and
probably for support data. Default: None.
support_backbone (dict | None): Config of the backbone for
support data only. If None, support and query data will
share same backbone. Default: None.
support_neck (dict | None): Config of the neck for support
data only. Default: None.
rpn_head (dict | None): Config of rpn_head. Default: None.
roi_head (dict | None): Config of roi_head. Default: None.
train_cfg (dict | None): Training config. Useless in CenterNet,
but we keep this variable for SingleStageDetector. Default: None.
test_cfg (dict | None): Testing config of CenterNet. Default: None.
pretrained (str | None): model pretrained path. Default: None.
init_cfg (dict | list[dict] | None): Initialization config dict.
Default: None.
"""
def __init__(self,
backbone: ConfigDict,
neck: Optional[ConfigDict] = None,
support_backbone: Optional[ConfigDict] = None,
support_neck: Optional[ConfigDict] = None,
rpn_head: Optional[ConfigDict] = None,
roi_head: Optional[ConfigDict] = None,
train_cfg: Optional[ConfigDict] = None,
test_cfg: Optional[ConfigDict] = None,
pretrained: Optional[ConfigDict] = None,
init_cfg: Optional[ConfigDict] = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
support_backbone=support_backbone,
support_neck=support_neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
self.is_model_init = False
# save support template features for model initialization,
# `_forward_saved_support_dict` used in :func:`forward_model_init`.
self._forward_saved_support_dict = {
'gt_labels': [],
'res4_roi_feats': [],
'res5_roi_feats': []
}
# save processed support template features for inference,
# the processed support template features are generated
# in :func:`model_init`
self.inference_support_dict = {}
@auto_fp16(apply_to=('img', ))
def extract_support_feat(self, img: Tensor) -> List[Tensor]:
"""Extract features of support data.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
list[Tensor]: Features of support images, each item with shape
(N, C, H, W).
"""
feats = self.support_backbone(img)
if self.support_neck is not None:
feats = self.support_neck(feats)
return feats
def forward_model_init(self,
img: Tensor,
img_metas: List[Dict],
gt_bboxes: List[Tensor] = None,
gt_labels: List[Tensor] = None,
**kwargs) -> Dict:
"""Extract and save support features for model initialization.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: `img_shape`, `scale_factor`, `flip`, and may also contain
`filename`, `ori_shape`, `pad_shape`, and `img_norm_cfg`.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box.
Returns:
dict: A dict contains following keys:
- `gt_labels` (Tensor): class indices corresponding to each
feature.
- `res4_roi_feat` (Tensor): roi features of res4 layer.
- `res5_roi_feat` (Tensor): roi features of res5 layer.
"""
self.is_model_init = False
# extract support template features will reset `is_model_init` flag
assert gt_bboxes is not None and gt_labels is not None, \
'forward support template require gt_bboxes and gt_labels.'
assert len(gt_labels) == img.size(0), \
'Support instance have more than two labels'
feats = self.extract_support_feat(img)
rois = bbox2roi([bboxes for bboxes in gt_bboxes])
res4_roi_feat = self.rpn_head.extract_roi_feat(feats, rois)
res5_roi_feat = self.roi_head.extract_roi_feat(feats, rois)
self._forward_saved_support_dict['gt_labels'].extend(gt_labels)
self._forward_saved_support_dict['res4_roi_feats'].append(
res4_roi_feat)
self._forward_saved_support_dict['res5_roi_feats'].append(
res5_roi_feat)
return {
'gt_labels': gt_labels,
'res4_roi_feats': res4_roi_feat,
'res5_roi_feats': res5_roi_feat
}
def model_init(self) -> None:
"""process the saved support features for model initialization."""
self.inference_support_dict.clear()
gt_labels = torch.cat(self._forward_saved_support_dict['gt_labels'])
# used for attention rpn head
res4_roi_feats = torch.cat(
self._forward_saved_support_dict['res4_roi_feats'])
# used for multi relation head
res5_roi_feats = torch.cat(
self._forward_saved_support_dict['res5_roi_feats'])
class_ids = set(gt_labels.data.tolist())
for class_id in class_ids:
self.inference_support_dict[class_id] = {
'res4_roi_feats':
res4_roi_feats[gt_labels == class_id].mean([0, 2, 3], True),
'res5_roi_feats':
res5_roi_feats[gt_labels == class_id].mean([0], True)
}
# set the init flag
self.is_model_init = True
# clear support dict
for k in self._forward_saved_support_dict.keys():
self._forward_saved_support_dict[k].clear()
def simple_test(self,
img: Tensor,
img_metas: List[Dict],
proposals: Optional[List[Tensor]] = None,
rescale: bool = False) -> List[List[np.ndarray]]:
"""Test without augmentation.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: `img_shape`, `scale_factor`, `flip`, and may also contain
`filename`, `ori_shape`, `pad_shape`, and `img_norm_cfg`.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
proposals (list[Tensor] | None): override rpn proposals with
custom proposals. Use when `with_rpn` is False. Default: None.
rescale (bool): If True, return boxes in original image space.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
assert len(img_metas) == 1, 'Only support single image inference.'
if (self.inference_support_dict == {}) or (not self.is_model_init):
# process the saved support features
self.model_init()
results_dict = {}
query_feats = self.extract_feat(img)
for class_id in self.inference_support_dict.keys():
support_res4_roi_feat = \
self.inference_support_dict[class_id]['res4_roi_feats']
support_res5_roi_feat = \
self.inference_support_dict[class_id]['res5_roi_feats']
if proposals is None:
proposal_list = self.rpn_head.simple_test(
query_feats, support_res4_roi_feat, img_metas)
else:
proposal_list = proposals
results_dict[class_id] = self.roi_head.simple_test(
query_feats,
support_res5_roi_feat,
proposal_list,
img_metas,
rescale=rescale)
results = [
results_dict[i][0][0] for i in sorted(results_dict.keys())
if len(results_dict[i])
]
return [results]
|
import torch
from trains.benchmark_trains.mhkd_training import train_mhkd_grid
from models.resnet_cifar import resnet8_cifar, resnet110_cifar,resnet20_cifar
from models.Middle_Logit_Gen import Model_Wrapper
from general_utils import test_data_evaluation
from dataloader import get_test_loader_cifar
from models.middle_header_generator_mhkd import Middle_Logit_Generator_mhkd
import os
from models.OOG_resnet import ResNet34
from general_utils import get_optimizer_scheduler
from models.wide_resnet import get_Wide_ResNet_28_2
DATASETS = ["cifar100"]
DEVICE = "cuda:1"
log_path = "/home/aasadian/res20/teacher_vgg11/mhkd/"
seeds = [30,50,67]
mhkd_beta = 0.5
#temperatures = [2,4,5,]
temperatures = [4,]
alphas = [0.1]
SPATIAL_SIZE = 32
BATCH_SIZE = 64
EPOCHS= 200
SERVER = 2
TEST_MODE = True
for dataset in DATASETS:
if dataset == "cifar10":
NUM_CLASSES = 10
test_loader = get_test_loader_cifar(batch_size=BATCH_SIZE, dataset=dataset)
teacher_path = "/home/aasadian/saved/shufflenetv2cifar10_seed30/_.pth"
elif dataset == "cifar100":
NUM_CLASSES = 100
test_loader = get_test_loader_cifar(batch_size=BATCH_SIZE, dataset=dataset)
# teacher_path = "/home/aasadian/saved/ce/cifar100/res110_cifar100.pth" Res110
teacher_path = "/home/aasadian/virtualvenvs/gputestvenv/fitnes_from_scratch/codistillation/bests/ce/vgg11_cifar100.pth" #WRES_28_2
elif dataset == "tiny":
NUM_CLASSES = 200
SPATIAL_SIZE = 64
#TODO saved path for Tiny Image-Net teacher, and the test loader
print("TINY IMAGE NET")
for seed in seeds:
virtual_input = torch.rand((1, 3, SPATIAL_SIZE, SPATIAL_SIZE))
student = resnet20_cifar(seed=seed,num_classes=NUM_CLASSES)
student_outs = student(virtual_input)
student_head_1_model = Middle_Logit_Generator_mhkd(student_outs[1], num_classes=NUM_CLASSES, seed=seed)
student_head_2_model = Middle_Logit_Generator_mhkd(student_outs[2], num_classes=NUM_CLASSES, seed=seed)
#student_head_3_model = Middle_Logit_Generator_mhkd(student_outs[3], num_classes=NUM_CLASSES, seed=seed)
student_headers_dict = {}
student_headers_dict[1] = student_head_1_model
student_headers_dict[2] = student_head_2_model
#student_headers_dict[3] = student_head_3_model
from models.VGG_models import VGG_Intermediate_Branches
teacher_core = VGG_Intermediate_Branches("VGG11",seed=seed,num_classes=NUM_CLASSES)
full_modules_state_dict = {}
saved_state_dict = torch.load(teacher_path)
testing_state_dict = {}
for (key, value), (key_saved, value_saved) in zip(teacher_core.state_dict().items(), saved_state_dict.items()):
testing_state_dict[key] = value_saved
full_modules_state_dict["core."+key] = value_saved
teacher_core.load_state_dict(testing_state_dict)
teacher_core.eval()
teachers_outs = teacher_core(virtual_input)
# for VGG 16 and 11
# branch_1_model = Middle_Logit_Generator(outs[1][0],num_classes=10)
# for the rest
teacher_head_1_model = Middle_Logit_Generator_mhkd(teachers_outs[0],num_classes=NUM_CLASSES,seed=seed)
teacher_head_2_model = Middle_Logit_Generator_mhkd(teachers_outs[1],num_classes=NUM_CLASSES,seed=seed)
#teacher_head_3_model = Middle_Logit_Generator_mhkd(teachers_outs[3], num_classes=NUM_CLASSES,seed=seed)
#For Res 34
#teacher_head_4_model = Middle_Logit_Generator_mhkd(teachers_outs[4], num_classes=NUM_CLASSES,seed=seed,padding=1)
from torchsummary import summary
#print("Shape",(teachers_outs[3])[0].shape)
#summary(teacher_head_3_model,input_size=(teachers_outs[3])[0].shape,device="cpu")
teacher_headers_dict = {}
#teacher_headers_dict[1] = teacher_head_1_model
teacher_headers_dict[1] = teacher_head_1_model
teacher_headers_dict[2] = teacher_head_2_model
#teacher_headers_dict[3] = teacher_head_3_model
#for Res 34
#teacher_headers_dict[4] = teacher_head_4_model
params = list(student.parameters()) + \
list(student_head_1_model.parameters()) + \
list(student_head_2_model.parameters()) + \
list(teacher_head_1_model.parameters()) + \
list(teacher_head_2_model.parameters())
# list(student_head_3_model.parameters()) + \
#list(teacher_head_3_model.parameters()) #+ \
#list(teacher_head_4_model.parameters())
optimizer, scheduler = get_optimizer_scheduler(params, params_sent=True)
for temperature in temperatures:
for alpha in alphas:
kd_alpha = {}
# FINAL LOGITS
kd_alpha[0] = alpha
# WEAKEST CLASSIFER
kd_alpha[1] = alpha
kd_alpha[2] = alpha
#kd_alpha[3] = alpha
#kd_alpha[4] = alpha
experiment_name = dataset + "_seed_" + str(seed) + "_temp_" + str(temperature) + "_alpha_" + str(alpha)
test_acc,time_elapsed = train_mhkd_grid(student=student,
trained_core_teacher=teacher_core,
teacher_headers_dict=teacher_headers_dict,
student_headers_dict=student_headers_dict,
mhkd_beta = mhkd_beta,
optimizer=optimizer,
dataset=dataset,
path_to_save=log_path + experiment_name+".pth",
epochs=EPOCHS,
train_on=DEVICE,
server=SERVER,
input_sample_size=(BATCH_SIZE, SPATIAL_SIZE, SPATIAL_SIZE),
scheduler=scheduler,
kd_alpha=kd_alpha,
kd_temperature=temperature)
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60),"\t","Dataset ==>", dataset, "\tSeed ==>", seed, "\tTemperature ==>", temperature, "\tAlpha ===>",
alpha, "\tTest Acc ==>", test_acc)
log_text = "Experiment Name : " + experiment_name + "\n"
if not os.path.exists(log_path + "/res20.txt"):
readme = open(log_path + "/res20.txt", "a+")
else:
readme = open(log_path + "/res20.txt", "a+")
log_text += "Test Acc ==> " + str(test_acc) + "\n"
log_text += ("#" * 40) + "\n\n"
readme.write(log_text)
readme.close()
|
from django.contrib import admin
from solo.admin import SingletonModelAdmin
from .models import Settings
@admin.register(Settings)
class SettingsAdmin(SingletonModelAdmin):
...
|
# Copyright 2007-2014 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Rajiv Mayani'
from decimal import Decimal
from flask import url_for
from flask.json import JSONEncoder
from pegaflow.db.schema import *
from pegaflow.service.base import (
ErrorResponse, OrderedDict, OrderedSet, PagedResponse
)
from pegaflow.service.monitoring.resources import (
HostResource, InvocationResource, JobInstanceResource,
JobResource, JobstateResource, RCLFNResource, RCMetaResource,
RCPFNResource, RootWorkflowResource, RootWorkflowstateResource,
TaskMetaResource, TaskResource, WorkflowFilesResource,
WorkflowMetaResource, WorkflowResource, WorkflowstateResource
)
from sqlalchemy.orm.attributes import instance_state
log = logging.getLogger(__name__)
class PegasusServiceJSONEncoder(JSONEncoder):
"""
JSON Encoder for Pegasus Service API Resources
"""
def default(self, obj):
def obj_to_dict(
resource, fields=None, ignore_unloaded=False, data=None
):
data = data if data else obj
obj_dict = OrderedDict()
if ignore_unloaded:
unloaded = instance_state(data).unloaded
log.debug('ignore_unloaded is True, ignoring %s' % unloaded)
for attribute in resource.fields:
if not ignore_unloaded or (
ignore_unloaded and attribute not in unloaded
):
obj_dict[attribute] = getattr(data, attribute)
if fields:
for attribute in fields:
try:
if not ignore_unloaded or (
ignore_unloaded and attribute not in unloaded
):
obj_dict[attribute] = getattr(data, attribute)
except AttributeError:
pass
return obj_dict
if isinstance(obj, PagedResponse):
json_record = OrderedDict([('records', obj.records)])
if obj.total_records or obj.total_filtered:
meta = OrderedDict()
if obj.total_records is not None:
meta['records_total'] = obj.total_records
if obj.total_filtered is not None:
meta['records_filtered'] = obj.total_filtered
json_record['_meta'] = meta
return json_record
elif isinstance(obj, ErrorResponse):
json_record = OrderedDict(
[('code', obj.code), ('message', obj.message)]
)
if obj.errors:
json_record['errors'] = [
{
'field': f,
'errors': e
} for f, e in obj.errors
]
return json_record
elif isinstance(obj, DashboardWorkflow):
json_record = obj_to_dict(RootWorkflowResource())
json_record['workflow_state'] = obj.workflow_state
json_record['_links'] = OrderedDict(
[
(
'workflow',
url_for(
'.get_workflows', m_wf_id=obj.wf_id, _method='GET'
)
)
]
)
return json_record
elif isinstance(obj, DashboardWorkflowstate):
json_record = obj_to_dict(RootWorkflowstateResource())
return json_record
elif isinstance(obj, Workflow):
json_record = obj_to_dict(WorkflowResource())
json_record['_links'] = OrderedDict(
[
(
'workflow_meta',
url_for(
'.get_workflow_meta',
wf_id=obj.wf_id,
_method='GET'
)
), (
'workflow_state',
url_for(
'.get_workflow_state',
wf_id=obj.wf_id,
_method='GET'
)
), (
'job',
url_for(
'.get_workflow_jobs',
wf_id=obj.wf_id,
_method='GET'
)
), (
'task',
url_for(
'.get_workflow_tasks',
wf_id=obj.wf_id,
_method='GET'
)
), (
'host',
url_for(
'.get_workflow_hosts',
wf_id=obj.wf_id,
_method='GET'
)
), (
'invocation',
url_for(
'.get_workflow_invocations',
wf_id=obj.wf_id,
_method='GET'
)
)
]
)
return json_record
elif isinstance(obj, WorkflowMeta):
json_record = obj_to_dict(WorkflowMetaResource())
json_record['_links'] = OrderedDict(
[('workflow', url_for('.get_workflow', wf_id=obj.wf_id))]
)
return json_record
elif isinstance(obj, WorkflowFiles):
json_record = obj_to_dict(WorkflowFilesResource())
return json_record
elif isinstance(obj, Workflowstate):
json_record = obj_to_dict(WorkflowstateResource())
json_record['_links'] = OrderedDict(
[('workflow', url_for('.get_workflow', wf_id=obj.wf_id))]
)
return json_record
elif isinstance(obj, Job):
json_record = obj_to_dict(JobResource())
if hasattr(obj, 'job_instance'):
json_record['job_instance'] = obj.job_instance
json_record['_links'] = OrderedDict(
[
('workflow', url_for('.get_workflow', wf_id=obj.wf_id)), (
'task',
url_for(
'.get_workflow_tasks',
wf_id=obj.wf_id,
_method='GET'
)
), (
'job_instance',
url_for(
'.get_job_instances',
wf_id=obj.wf_id,
job_id=obj.job_id,
_method='GET'
)
)
]
)
return json_record
elif isinstance(obj, Host):
json_record = obj_to_dict(HostResource())
json_record['_links'] = OrderedDict(
[
(
'workflow',
url_for(
'.get_workflows', m_wf_id=obj.wf_id, _method='GET'
)
)
]
)
return json_record
elif isinstance(obj, Jobstate):
json_record = obj_to_dict(JobstateResource())
json_record['_links'] = OrderedDict(
[
(
'job_instance',
url_for(
'.get_job_instance',
job_instance_id=obj.job_instance_id
)
)
]
)
return json_record
elif isinstance(obj, Task):
json_record = obj_to_dict(TaskResource())
json_record['_links'] = OrderedDict(
[
('workflow', url_for('.get_workflow', wf_id=obj.wf_id)), (
'job',
url_for(
'.get_job', wf_id=obj.wf_id, job_id=obj.job_id
)
), (
'task_meta',
url_for(
'.get_task_meta',
wf_id=obj.wf_id,
job_id=obj.job_id,
task_id=obj.task_id
)
)
]
)
return json_record
elif isinstance(obj, TaskMeta):
json_record = obj_to_dict(TaskMetaResource())
json_record['_links'] = OrderedDict(
[('task', url_for('.get_task', task_id=obj.task_id))]
)
return json_record
elif isinstance(obj, JobInstance):
json_record = obj_to_dict(
JobInstanceResource(), ignore_unloaded=True
)
json_record['_links'] = OrderedDict(
[
('job', url_for('.get_job', job_id=obj.job_id)), (
'state',
url_for(
'.get_job_instance_states',
job_id=obj.job_id,
job_instance_id=obj.job_instance_id,
_method='GET'
)
)
]
)
json_record['_links']['host'] = url_for(
'.get_host', host_id=obj.host_id
) if obj.host_id else None
json_record['_links']['invocation'] = url_for(
'.get_job_instance_invocations',
job_id=obj.job_id,
job_instance_id=obj.job_instance_id,
_method='GET'
)
return json_record
elif isinstance(obj, Invocation):
json_record = obj_to_dict(InvocationResource())
json_record['_links'] = OrderedDict(
[
('workflow', url_for('.get_workflow', wf_id=obj.wf_id)), (
'job_instance',
url_for(
'.get_job_instance',
job_instance_id=obj.job_instance_id
)
)
]
)
return json_record
elif isinstance(obj, RCLFN):
json_record = obj_to_dict(RCLFNResource(), fields=['pfns', 'meta'])
if hasattr(obj, 'extras'):
json_record_2 = obj_to_dict(
WorkflowFilesResource(), data=obj.extras
)
json_record_2.update(json_record)
json_record = json_record_2
json_record['_links'] = OrderedDict(
[
(
'workflow',
url_for('.get_workflow', wf_id=obj.extras.wf_id)
), (
'task',
url_for(
'.get_task',
wf_id=obj.extras.wf_id,
task_id=obj.extras.task_id
)
)
]
)
return json_record
elif isinstance(obj, RCPFN):
json_record = obj_to_dict(RCPFNResource())
return json_record
elif isinstance(obj, RCMeta):
json_record = obj_to_dict(RCMetaResource())
return json_record
elif isinstance(obj, OrderedSet):
json_record = [item for item in obj]
return json_record
elif isinstance(obj, Decimal):
return float(obj)
return JSONEncoder.default(self, obj)
|
from taskio.model import TaskioTask
class FirstLevelTask(TaskioTask):
def run(self, namespace):
print("buuuu")
class GenerateUuidTask(TaskioTask):
def add_arguments(self, parser):
"""
:param parser:
:return:
"""
parser.add_argument("-a", "--addresses", required=True)
def get_error_message(self, error):
return "Error executing 'do something'.\n%s" % error.help
def is_my_error(self, error):
if "argument -a/--addresses" in error.help:
return True
return False
""" Generates an uuid4 string
"""
def run(self, namespace):
from uuid import uuid4
print(uuid4())
|
def nrvocale(text):
"""Scrieti o functie care calculeaza cate vocale sunt intr-un string"""
count = 0
for c in text:
if c in ['a', 'e', 'i', 'o', 'u']:
count = count + 1
return count
if __name__ == "__main__":
print (nrvocale("Marian"))
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.TestListView.as_view(), name='list'),
path('add/', views.TestCreateView.as_view(), name='create'),
path('<int:pk>/', views.TestDetailView.as_view(), name='detail'),
path('<int:pk>/edit/', views.TestUpdateView.as_view(), name='update'),
path('<int:pk>/process/', views.test_process, name='process'),
]
|
from seaice import *
from firedrake import *
from pathlib import Path
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
path = "./output/mk/figure5"
Path(path).mkdir(parents=True, exist_ok=True)
"""
TEST 2 : EVP
File reproducing figure 5 from Mehlmann and Korn (2021)
Fig 5 a) Energy vs. t (0-24h)
compare evp and vp unstabilised and stabilised
Fig 5 b) Energy vs. t (0-24h)
evp unstabilised and stabilised applied to resolution (10km, 5km, 2.5km)
Fig 5 c) Energy (log) vs. resolution (10km, 5km, 2.5km)
"""
timestep = 1
dumpfreq = 10 ** 10
timescale = 24 * 60 * 60
dirname = path + "/u.pvd"
fig5c_title = "Figure 5 c)"
d_dirname1 = path + "/evp_energy2.nc"
d_dirname2 = path + "/evp_stab_energy2.nc"
fig5c_dirname = path + "/fig5c.png"
for triangles in [50, 100, 200]:
number_of_triangles = triangles
length = 5 * 10 ** 5
mesh = SquareMesh(number_of_triangles, number_of_triangles, length)
x, y = SpatialCoordinate(mesh)
ocean_curr = as_vector(
[0.1 * (2 * y - length) / length, -0.1 * (length - 2 * x) / length]
)
ic = {"u": 0, "a": x / length, "h": 1, "s": as_matrix([[0, 0], [0, 0]])}
conditions = Conditions(theta=0.5, ocean_curr=ocean_curr, ic=ic)
stabilised = {"state": True, "alpha": 10}
conditions_stab = Conditions(
theta=0.5, ocean_curr=ocean_curr, stabilised=stabilised, ic=ic
)
timestepping = TimesteppingParameters(timescale=timescale, timestep=timestep)
output = OutputParameters(dirname=dirname, dumpfreq=dumpfreq)
solver = SolverParameters()
params = SeaIceParameters()
evp = ElasticViscousPlastic(
mesh=mesh,
conditions=conditions,
timestepping=timestepping,
output=output,
params=params,
solver_params=solver,
)
evp.assemble(evp.eqn, evp.w1, evp.bcs, solver.srt_params)
evp.u1, evp.s1 = evp.w1.split()
evp_stab = ElasticViscousPlastic(
mesh=mesh,
conditions=conditions_stab,
timestepping=timestepping,
output=output,
params=params,
solver_params=solver,
)
evp_stab.assemble(evp_stab.eqn, evp_stab.w1, evp_stab.bcs, solver.srt_params)
evp_stab.u1, evp_stab.s1 = evp_stab.w1.split()
t = 0
diag1 = OutputDiagnostics(description="EVP Energy", dirname=d_dirname1)
diag2 = OutputDiagnostics(description="EVP Stabilised Energy", dirname=d_dirname2)
while t < timescale - 0.5 * timestep:
evp.solve(evp.usolver)
evp.update(evp.w0, evp.w1)
evp.dump(evp.u1, evp.s1, t=t)
evp_stab.solve(evp_stab.usolver)
evp_stab.update(evp_stab.w0, evp_stab.w1)
evp_stab.dump(evp_stab.u1, evp_stab.s1, t=t)
t += timestep
evp.progress(t)
diag1.dump(evp.u1, t)
diag2.dump(evp_stab.u1, t)
# fig 5c
dataset1 = Dataset(d_dirname1, mode="r")
yaxis1 = dataset1.variables["energy"][:]
dataset1.close()
dataset2 = Dataset(d_dirname2, mode="r")
yaxis2 = dataset2.variables["energy"][:]
dataset2.close()
plt.semilogy(triangles, yaxis1, "o-", label="{} triangles".format(triangles))
plt.semilogy(
triangles, yaxis2, "o-", label="{} triangles stabilised".format(triangles)
)
plt.ylabel(r"Energy of solution")
plt.xlabel(r"Mesh Size")
plt.title(fig5c_title)
plt.legend(loc="best")
plt.savefig(fig5c_dirname)
|
# MenuTitle: Add UGI anchors
# -*- coding: utf-8 -*-
from AppKit import NSEvent
from UGI import LayerPositions
__doc__ = """
Adds Anchors using the UGI.
"""
from UGI.UGI_importer import import_scripts
unified_infos = import_scripts(Glyphs.font)
Glyphs.clearLog()
# Glyphs.showMacroWindow()
# --- From mekkablue ---
keysPressed = NSEvent.modifierFlags()
capslockKey = 65536
capslockKeyPressed = keysPressed & capslockKey == capslockKey
shiftKey = 131072
shiftKeyPressed = keysPressed & shiftKey == shiftKey
# ---
for sl in Glyphs.font.selectedLayers:
g = sl.parent
ugi = unified_infos.get(g.name)
if not ugi.anchors:
if g.script == 'devanagari' and g.category == 'Letter':
ugi = unified_infos.get('default-deva')
test_g_name = g.name.partition('.')[0]
ugi = unified_infos.get(test_g_name)
for l in g.layers:
if capslockKeyPressed:
l.anchors = []
extant_anchors = [a.name for a in l.anchors]
if g.script not in ['devanagari']:
l.addMissingAnchors()
suppressable_anchors = []
if ugi:
lp = LayerPositions.layerPositions(l)
for aname, parameters in ugi.anchors.items():
if shiftKeyPressed:
if aname in extant_anchors:
continue
if l.anchors[aname] is None:
l.anchors[aname] = GSAnchor()
if parameters.get('suppress_auto'):
suppressable_anchors.append(aname)
continue
apos = lp.get_coords(parameters.get('position_x', l.anchors[aname].x), parameters.get('position_y', l.anchors[aname].y))
if apos is None:
apos = (lp.xpos_outline_center, lp.ypos_xHeight)
if apos[0] is not None:
l.anchors[aname].x = apos[0]
if apos[1] is not None:
l.anchors[aname].y = apos[1]
for an in suppressable_anchors:
del(l.anchors[an])
|
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
import seaborn as sns
from flow import *
z = np.load('data/latent_space.npy',mmap_mode = 'c')
z_2 = np.load('data/latent_space_2.npy',mmap_mode = 'c')
z = np.concatenate((z,z_2), axis = 0)
hidden_dim = [256,256]
layers =8
bijectors = []
for i in range(0, layers):
made = make_network(32, hidden_dim,2)
bijectors.append(MAF(made))
bijectors.append(tfb.Permute(permutation=[31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]))
bijectors = tfb.Chain(bijectors=list(reversed(bijectors[:-1])))
distribution = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=bijectors,
event_shape=[32]
)
x_ = tfkl.Input(shape=(32,), dtype=tf.float32)
log_prob_ = distribution.log_prob(x_)
model = tfk.Model(x_, log_prob_)
model.compile(optimizer=tf.optimizers.Adam(), loss=lambda _, log_prob: -log_prob)
model.summary()
# loading_path = 'nflow_weights/'
# latest = tf.train.latest_checkpoint(loading_path)
# model.load_weights(latest)
_ = model.fit(x=z,
y=np.zeros((z.shape[0], 0), dtype=np.float32),
batch_size= z.shape[0],
epochs=10000,
steps_per_epoch=1,
verbose=1,
shuffle=False)
saving_path = 'nflow_weights/'
model.save_weights(saving_path+'test')
|
def menu_action(menu, name):
for action in menu.actions():
if action.text() == name:
return action
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 09:44:59 2019
@author: Elkoumy
"""
import pandas as pd
#data1= pd.read_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\Road_Traffic_Fine_Management_Process.csv")
#
#output=pd.DataFrame()
#output['case']=data1['case:concept:name']
#output['event']=data1['concept:name']
#output['completeTime']=data1['time:timestamp']
#
#output.to_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\Road_Traffic_Fine_Management_Process_3_columns.csv", index=0)
#
#
#data1= pd.read_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\Hospital_log.csv")
#
#output=pd.DataFrame()
#output['case']=data1['case:concept:name']
#output['event']=data1['concept:name']
#output['completeTime']=data1['time:timestamp']
#
#output.to_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\Hospital_log_3_columns.csv",index=0)
#
#
#data1= pd.read_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\BPI_Challenge_2017.csv")
#
#output=pd.DataFrame()
#output['case']=data1['case:concept:name']
#output['event']=data1['concept:name']
#output['completeTime']=data1['time:timestamp']
#
#output.to_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\BPI_Challenge_2017_3_columns.csv",index=0)
#
#
#data1= pd.read_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\CreditRequirement.csv")
#
#output=pd.DataFrame()
#output['case']=data1['case:concept:name']
#output['event']=data1['concept:name']
#output['completeTime']=data1['time:timestamp']
#
#output.to_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\CreditRequirement_3_columns.csv",index=0)
#
#
#data1= pd.read_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\CCC19.csv")b
#
#output=pd.DataFrame()
#output['case']=data1['case:concept:name']
#output['event']=data1['ACTIVITY']
#output['completeTime']=data1['time:timestamp']
#
#output.to_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\CCC19_3_columns.csv",index=0)
data1= pd.read_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\Sepsis Cases - Event Log.csv")
output=pd.DataFrame()
output['case']=data1['case:concept:name']
output['event']=data1['concept:name']
output['completeTime']=data1['time:timestamp']
output.to_csv(r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets\SEPSIS_3_columns.csv",index=0) |
from .bidirected_graph import BidirectedGraph
from .edge import Edge
from .graph import Graph
from .tnode import TNode
|
import logging
import json
# create logger
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
def printThingAsJSON(thing):
print(json.dumps(thing))
if __name__ == "__main__":
output = {"Hi": "Mom"}
printThingAsJSON(output)
logging.debug("Completed.")
|
import pandas as pd
from os import walk
filedir = './Final_Data/'
flx = next(walk(filedir))[1]
csvs = []
for f in flx:
try:
filenames = ['hdb','hroster']
inner_filenames = next(walk(filedir+f+'/pdb/'), (None, None, []))[2]
# print(filenames,":",inner_filenames)
datax = []
for filer in inner_filenames:
# print('at file',filer)
with open(filedir+f+'/pdb/'+filer) as fx:
lines = fx.readlines()
for line in lines:
lx = line.split('\t')[0]
ndat = list(filter(lambda x: x != '',lx.split(' ')))
if ndat[0] == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00':
break
if ndat[-1] == '\n':
continue
ndat[-1] = ndat[-1].strip('\n')
ndat[2:8] = []
ndat[3] = int(ndat[4]) - int(ndat[3])
ndat[4] = ndat[5]+ ndat[6]
ndat[5] = ndat[5][0] + ndat[6][0]
ndat[6] = ''
datax.append(ndat)
dataxmap = {}
with open(filedir+f+'/hdb') as fx:
lines = fx.readlines()
for line in lines:
lx = line.split('\t')[0]
ndat = list(filter(lambda x: x != '',lx.split(' ')))
ndat[-1] = ndat[-1].strip('\n')
ndat[1:8] = []
ndat[1] = ''.join(ndat[1:])
ndat[2:] = []
dataxmap[ndat[0]] = ndat[1]
dfx = pd.DataFrame(data=datax,columns=['player_name','hand_id','starting_bankroll','net_earning','player_cards','play_cards_value','community_cards'])
dfx['community_cards'] = dfx.apply(lambda x: dataxmap[x['hand_id']],axis=1)
csvs.append(dfx)
except:
print(f,' FAILED')
continue
print(f,' PASSED')
pd.concat(csvs).to_csv('./out/extractedBDF.csv',index=False) |
import cv2
from matplotlib import pyplot as plt
import numpy as np
def norm(val,tmin,tmax,nmin,nmax):
downrange = ((val-nmin))/(nmax-nmin)
if(downrange<0):
downrange = 0.0
elif(downrange>1):
downrange = 1.0
uprange = downrange*(tmax-tmin)
return uprange+tmin
#for mono, just take the average, and make a 3 slot array with copies.
class Grapher:
def __init__(self):
#self.data = np.array([redfinalstack_np,greenfinalstack_np,bluefinalstack_np])
pass
def plot(self,nmin,nmax,data):
#data = data.swapaxes(0,2)
#data = np.array(map(lambda color: map(lambda row: map(lambda cell: 0 if cell<0 else cell,row),color),data))
print "minimum:",data
#gray_data = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY) # from (5x5x3) to (5x5) matrix, perfect for plotting!
#np.ndarray(shape=(2,2), dtype=float, order='F')
colordata = np.ndarray(shape = (len(data[0]),len(data[0][0]),3))
#print len(data[0]),len(data[0][0])
#print colordata
for x in range(len(data[0])):
for y in range(len(data[0][x])):
##map to 0-255 based on nmin and nmax
color = np.array([norm(data[0][x][y],0.0,1.0,nmin,nmax),norm(data[1][x][y],0.0,1.0,nmin,nmax),norm(data[2][x][y],0.0,1.0,nmin,nmax)])
#print color
colordata[x][y][0] = color[0] #print colordata[x][y]
colordata[x][y][1] = color[1]
colordata[x][y][2] = color[2]
# = 0
plt.imshow(colordata)
def centroids(self,centroids):
xes = map(lambda x: x[0],centroids)
yes = map(lambda x: x[1],centroids)
zes = map(lambda x: x[2],centroids)
plt.scatter(xes,yes,s=50, facecolors='none', edgecolors='r')
def finish(self):
plt.xticks([])
plt.yticks([])
plt.show()
'''
class CentroidGrapher(grapher):
def __init__(self,redfinalstack_np,greenfinalstack_np,bluefinalstack_np,centroids):
super(self,redfinalstack_np,greenfinalstack_np,bluefinalstack_np)
class HRGrapher(grapher):
def __init__(self,redfinalstack_np,greenfinalstack_np,bluefinalstack_np,stars):
super(self,redfinalstack_np,greenfinalstack_np,bluefinalstack_np)'''
class Analyzer:
def __init__(self,redfinalstack_np,greenfinalstack_np,bluefinalstack_np):
self.redfinalstack_np = redfinalstack_np
self.greenfinalstack_np = greenfinalstack_np
self.bluefinalstack_np = bluefinalstack_np
#virtual floodfill
self.locations_visited = []
#centroid list.
def averageStack(self,redfinalstack_np,greenfinalstack_np,bluefinalstack_np):
stack = np.average(np.array([redfinalstack_np,greenfinalstack_np,bluefinalstack_np]),axis = 0)
return stack
def floodfill(self,x,y):
return monostack
def cutoff(self,minimum,redfinalstack_np,greenfinalstack_np,bluefinalstack_np):
##average up all stacks
stack = np.average(np.array([redfinalstack_np,greenfinalstack_np,bluefinalstack_np]),axis = 0)
print stack
monostack = map(lambda row: map(lambda cell: 0 if (cell < minimum) else 1,row),stack)
return monostack
def floodfill(self,stack,x,y,lightlimit,sizelimit):
print(len(stack[0]),len(stack))
locations_visited = np.full((len(stack),len(stack[0])),0)
#print locations_visited
if(stack[y][x]>lightlimit):
locations_visited[y][x] = 1
else:
return []
## [(50,50)]
places_stack = [[x,y,stack[y][x]]]
locations_list = []
#print places_stack
while(len(places_stack)>0):
place = places_stack[0]
#print "PLACES_STACK:",places_stack
x = place[0]
y = place[1]
#print y,x,stack[y][x]
if(y+1<len(stack)):
if((locations_visited[y+1][x] == 0) and stack[y+1][x] > lightlimit):
places_stack.append([x,y+1,stack[y+1][x]])
locations_visited[y+1][x] = 1
self.locations_visited[y+1][x] = 1
locations_list.append([x,y+1,stack[y+1][x]])
if(y-1>=0):
if((locations_visited[y-1][x] == 0) and stack[y-1][x] > lightlimit):
places_stack.append([x,y-1,stack[y-1][x]])
locations_visited[y-1][x] = 1
self.locations_visited[y-1][x] = 1
locations_list.append([x,y-1,stack[y-1][x]])
if(x+1<len(stack[0])):
if((locations_visited[y][x+1] == 0) and stack[y][x+1] > lightlimit):
places_stack.append([x+1,y,stack[y][x+1]])
locations_visited[y][x+1] = 1
self.locations_visited[y][x+1] = 1
locations_list.append([x+1,y,stack[y][x+1]])
if(x-1>=0):
if((locations_visited[y][x-1] == 0) and stack[y][x-1] > lightlimit):
places_stack.append([x-1,y,stack[y][x-1]])
locations_visited[y][x-1] = 1
self.locations_visited[y][x-1] = 1
locations_list.append([x-1,y,stack[y][x-1]])
places_stack = places_stack[1:]
#print locations_list
return locations_list
#current_locations = self.floodfillascent(stack,x,y,[],0)
#print "length of current_locations:",len(current_locations)
#print "current_locations:",current_locations
def backgroundCounts(self,stack_color_np):
flatcounts = stack_color_np.flatten(order="C")
flatcounts = sorted(flatcounts)
return sum(flatcounts[0:len(flatcounts)/5])/len(flatcounts)
def starFluxColor(self,stack_color_np,x,y,r):
summation = 0.0
for row in range(r-y,r+y):
for col in range(r-x,r+x):
if (col-x)**2 + (row-y)**2 < r:
try:
summation += stack_color_np[row][col]
except IndexError:
pass
return summation
def starFlux(self,stack_color,x,y,r):
if(stack_color == "R"):
return self.starFluxColor(self.redfinalstack_np,x,y,r)
if(stack_color == "G"):
return self.starFluxColor(self.greenfinalstack_np,x,y,r)
if(stack_color == "B"):
return self.starFluxColor(self.bluefinalstack_np,x,y,r)
if(stack_color == "V"):
return self.starFluxColor(self.redfinalstack_np,x,y,r)+self.starFluxColor(self.greenfinalstack_np,x,y,r)+self.starFluxColor(self.bluefinalstack_np,x,y,r)
def centroid(self,stack,x,y,sizelimit,CUTOFF_VALUE):
#print "Starting my centroid"
#return [x,y,1.0]
floodfilllist = self.floodfill(stack,x,y,CUTOFF_VALUE,sizelimit)
if floodfilllist == [] or len(floodfilllist)<sizelimit:
return []
print len(floodfilllist)
##now get the HM.
maxflood = max(floodfilllist,key = lambda x:x[2])
##now get the FWHM
##get it for band B and band V
##add both values to tuple.
##OR just use a radius of ten circle and sum all the values in it.
return maxflood
def centroids(self,stack,sizelimit,CUTOFF_VALUE):
#stack = self.cutoff(stack,stack,stack,450)
##average of all colors to center.
self.locations_visited = np.ndarray(shape=(len(stack),len(stack[0])))
centroid_list = []
#stack = np.swapaxes(stack, 0, 1)
for y in range(0,len(stack),5):
for x in range(0,len(stack[y]),5):
if self.locations_visited[y][x]==1:
print "already seen"
continue
if stack[y][x] < CUTOFF_VALUE:
continue
#print x,y
centroid = self.centroid(stack,x,y,sizelimit,CUTOFF_VALUE)
if centroid == []:
pass
else:
print "centroid:",centroid
if(centroid not in centroid_list):
centroid_list.append(centroid)
return sorted(centroid_list,key = lambda x: -x[2])
#def save(self,zipdest,target,redfinalstack_np,greenfinalstack_np,bluefinalstack_np):
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
__author__ = 'Patrice Carbonneau'
__contact__ = 'patrice.carbonneau@durham.ac.uk'
__copyright__ = '(c) Patrice Carbonneau'
__license__ = 'MIT'
''' This routine compiles sentinel data (preferably cropped) and
UAV high resolution class rasters and creates tensors suitable for DNN work.
saves a tensor and label vector in npy format
'''
###############################################################################
""" Libraries"""
import pandas as pd
import numpy as np
from skimage import io
import rasterio
#############################################################
"""Inputs"""
#############################################################
SiteList = 'EMPTY'#this has the lists of sites with name, month and year
DatFolder = 'EMPTY' #location of above
#tile size
size=7
#Output location
Outfile = 'EMPTY' #no extensions needed, added later
###############################################################################
'''Functions'''
def map2pix(rasterfile,xmap,ymap):
with rasterio.open(rasterfile) as map_layer:
coords2pix = map_layer.index(xmap,ymap)
return coords2pix
def pix2map(rasterfile,xpix,ypix):
with rasterio.open(rasterfile) as map_layer:
pix2coords = map_layer.xy(xpix,ypix)
return pix2coords
def GetCrispClass(CLS, UL, LR):
ClassOut=np.zeros((1,1,3))
Spot = CLS[UL[0]:LR[0], UL[1]:LR[1]]#
c,counts = np.unique(Spot, return_counts=True)
if len(c)>0:
if (np.min(c)>0):#no UAV class pixels as no data. 10x10m area of S2 pixel is 100% classified
if np.max(counts)>=(0.95*np.sum(counts)):#pure class
ClassOut[0,0,2]=c[np.argmax(counts)]
if np.max(counts)>=(0.5*np.sum(counts)):#majority class
ClassOut[0,0,1]=c[np.argmax(counts)]
if np.max(counts)>(np.sum(counts)/3):#relative majority class, assumes a 3 class problem
ClassOut[0,0,0]=c[np.argmax(counts)]
else:
ClassOut[0,0,0] = -1 #this flags a spot with no data
else:
ClassOut[0,0,0] = -1 #this flags a spot with no data
return ClassOut
def MakeCrispClass(S2Name, UAVClassName, CLS_UAV):
S2 = io.imread(S2Name)
w = S2.shape[0]
h = S2.shape[1]
CrispClass = np.zeros((w,h,3))
for W in range(w):
for H in range(h):
S2coords = pix2map(S2Name, W,H)
UL = map2pix(UAVClassName, S2coords[0]-5, S2coords[1]+5)
LR = map2pix(UAVClassName, S2coords[0]+5, S2coords[1]-5)
CrispClass[W,H,:] = GetCrispClass(CLS_UAV, UL, LR)
return CrispClass
def slide_rasters_to_tiles(im, CLS, size):
h=im.shape[0]
w=im.shape[1]
di=im.shape[2]
try:
dc =CLS.shape[2]
LabelTensor = np.zeros(((h-size)*(w-size), size,size,dc))
except:#case with desk-based polygons having 2D labels
dc=1
LabelTensor = np.zeros(((h-size)*(w-size), size,size,dc))
TileTensor = np.zeros(((h-size)*(w-size), size,size,di))
B=0
for y in range(0, h-size):
for x in range(0, w-size):
#print(str(x)+' '+str(y))
if dc>1:
LabelTensor[B,:,:,:] = CLS[y:y+size,x:x+size,:]#.reshape(size,size,dc)
else:
LabelTensor[B,:,:,0] = CLS[y:y+size,x:x+size]#.reshape(size,size,1)
TileTensor[B,:,:,:] = im[y:y+size,x:x+size,:]#.reshape(size,size,di)
B+=1
return TileTensor, LabelTensor
############################################################################################
'''Main processing'''
#load the site list
SiteDF = pd.read_csv(SiteList)
#Tile size
if size%2 != 0:
middle=size//2
else:
raise Exception('Tile size of '+str(size)+ ' is even and not valid. Please choose an odd tile size')
#initialise the main outputs with Relative majority, Majority and Pure class and Poygon class cases
MasterLabelDict = {'RelMajClass':0,'MajClass':0,'PureClass':0,'PolyClass':0,'Month':0,'Year':0,'Site':'none'}
MasterLabelDF = pd.DataFrame(data=MasterLabelDict, index=[0])
MasterTensor = np.zeros((1,size,size,12))
''''Pass 1: UAV classes'''
##run through the sites in the DF and extract the data
for s in range(len(SiteDF.Site)):
print('Processing UAV classes '+SiteDF.Site[s]+' '+str(SiteDF.Month[s])+' '+str(SiteDF.Year[s]))
# Getting the data
S2Image = DatFolder+SiteDF.Abbrev[s]+'_'+str(SiteDF.Month[s])+'_'+str(SiteDF.Year[s])+'_S2.tif'
Isubset=io.imread(S2Image)
#get both UAV class and S2 class and produce the fuzzy classification on the S2 image dimensions
ClassUAVName = DatFolder+SiteDF.Abbrev[s]+'_'+str(SiteDF.Month[s])+'_'+str(SiteDF.Year[s])+'_UAVCLS.tif'
ClassUAV = io.imread(ClassUAVName)
ClassUAV[ClassUAV<1] = 0 #catch no data <1 but not 0 cases
ClassUAV[ClassUAV>3] = 0 #filter other classes and cases where 255 is the no data value
Ccrisp1 = MakeCrispClass(S2Image, ClassUAVName, ClassUAV)
Ti, Tl = slide_rasters_to_tiles(Isubset, Ccrisp1, size)
labels = np.zeros((Tl.shape[0],7))
LabelDF = pd.DataFrame(data=labels, columns=['RelMajClass','MajClass','PureClass','PolyClass','Month','Year','Site'])
#add the labels and membership to a DF for export
for t in range(0, Tl.shape[0]):
LabelDF.RelMajClass[t]=Tl[t,middle,middle,0].reshape(1,-1)
LabelDF.MajClass[t]=Tl[t,middle,middle,1].reshape(1,-1)
LabelDF.PureClass[t]=Tl[t,middle,middle,2].reshape(1,-1)
LabelDF.PolyClass[t]=-1 #this flags the fact that this part does not compute polygon classes
LabelDF.Month[t]=SiteDF.Month[s]
LabelDF.Year[t]=SiteDF.Year[s]
LabelDF.Site[t]=SiteDF.Abbrev[s]
dataspots = LabelDF.RelMajClass != -1 #finds whre valid data was extracted
numel = np.sum(dataspots)
AugLabel = np.zeros((4*numel,7))
AugLabelDF = pd.DataFrame(data=AugLabel, columns=['RelMajClass','MajClass','PureClass','PolyClass','Month','Year','Site'])
AugTensor = np.zeros((4*numel, size,size,Ti.shape[3]))
#assemble valid data and a bit of data augmentation with 90 degree rotation and flips of the tensor
E=0
for n in range(0, len(dataspots)):
if dataspots[n]:
AugTensor[E,:,:,:]=Ti[n,:,:,:]
AugLabelDF.iloc[E] = LabelDF.iloc[n]
E+=1
noise=0.001*np.random.random((1,size,size,12))
Irotated = np.rot90(Ti[n,:,:,:])
AugTensor[E,:,:,:]=Irotated #+ noise
AugLabelDF.iloc[E] = LabelDF.iloc[n]
E+=1
noise=0.001*np.random.random((1,size,size,12))
Irotated = np.rot90(Irotated)
AugTensor[E,:,:,:]=Irotated + noise
AugLabelDF.iloc[E] = LabelDF.iloc[n]
E+=1
noise=0.001*np.random.random((1,size,size,12))
Irotated = np.rot90(Irotated)
AugTensor[E,:,:,:]=Irotated + noise
AugLabelDF.iloc[E] = LabelDF.iloc[n]
E+=1
MasterLabelDF = pd.concat([MasterLabelDF, AugLabelDF])
MasterTensor = np.concatenate((MasterTensor, AugTensor), axis=0)
''''Pass 2: Desk-Based Polygon classes'''
#run through the sites in the DF and extract the data
for s in range(len(SiteDF.Site)):
print('Processing desk-based polygon classes '+SiteDF.Site[s]+' '+str(SiteDF.Month[s])+' '+str(SiteDF.Year[s]))
# Getting the data
S2Image = DatFolder+SiteDF.Abbrev[s]+'_'+str(SiteDF.Month[s])+'_'+str(SiteDF.Year[s])+'_S2.tif'
Isubset=io.imread(S2Image)
ClassPolyFile = DatFolder+SiteDF.Abbrev[s]+'_'+str(SiteDF.Month[s])+'_'+str(SiteDF.Year[s])+'_dbPoly.tif'
#vectorise the Polygon classes
ClassPoly = io.imread(ClassPolyFile)
ClassPoly[ClassPoly>3] = 0 #filter other classes and cases where 255 is the no data value
ClassPoly[ClassPoly<1] = 0
Ti, Tl = slide_rasters_to_tiles(Isubset, ClassPoly, size)
# if len(Tl.shape)==3:
# Tl=Tl.reshape(Tl.shape[0], Tl.shape[1], Tl.shape[2], 1)
#
labels = np.zeros((Ti.shape[0],7))
LabelDF = pd.DataFrame(data=labels, columns=['RelMajClass','MajClass','PureClass','PolyClass','Month','Year','Site'])
#add the labels and membership to a DF for export
for t in range(0, Ti.shape[0]):
LabelDF.RelMajClass[t]=-1 #this flags the fact that this part does not compute classes from the UAV data
LabelDF.MajClass[t]=-1
LabelDF.PureClass[t]=-1
LabelDF.PolyClass[t]=Tl[t,middle,middle,0].reshape(1,-1)
LabelDF.Month[t]=SiteDF.Month[s]
LabelDF.Year[t]=SiteDF.Year[s]
LabelDF.Site[t]=SiteDF.Abbrev[s]
dataspots = LabelDF.PolyClass != 0 #finds where valid data was extracted
numel = np.sum(dataspots)
AugLabel = np.zeros((4*numel,7))
AugLabelDF = pd.DataFrame(data=AugLabel, columns=['RelMajClass','MajClass','PureClass','PolyClass','Month','Year','Site'])
AugTensor = np.zeros((4*numel, size,size,Ti.shape[3]))
#assemble valid data and a bit of data augmentation with three 90 degree rotations
E=0
for n in range(0, len(dataspots)):
if dataspots[n]:
AugTensor[E,:,:,:]=Ti[n,:,:,:]
AugLabelDF.iloc[E] = LabelDF.iloc[n]
E+=1
#noise=0.001*np.random.random((1,size,size,12))
Irotated = np.rot90(Ti[n,:,:,:])
AugTensor[E,:,:,:]=Irotated# + noise
AugLabelDF.iloc[E] = LabelDF.iloc[n]
E+=1
noise=0.001*np.random.random((1,size,size,12))
Irotated = np.rot90(Irotated)
AugTensor[E,:,:,:]=Irotated + noise
AugLabelDF.iloc[E] = LabelDF.iloc[n]
E+=1
noise=0.001*np.random.random((1,size,size,12))
Irotated = np.rot90(Irotated)
AugTensor[E,:,:,:]=Irotated + noise
AugLabelDF.iloc[E] = LabelDF.iloc[n]
E+=1
MasterLabelDF = pd.concat([MasterLabelDF, AugLabelDF])
MasterTensor = np.concatenate((MasterTensor, AugTensor), axis=0)
#Clean up the final DFs for export
MasterLabelDF = MasterLabelDF[MasterLabelDF.Site != 'none']
MasterTensor = MasterTensor[1:,:,:,:]
MasterLabelDF.index = range(0,len(MasterLabelDF.RelMajClass))
#export to feather for the DF and numpy for the tensor
OutTrain = Outfile +'_crisp_'+str(size)+'_T.npy'
OutLabel = Outfile+'_crisp_'+str(size)+'_L.csv'
np.save(OutTrain, MasterTensor)
MasterLabelDF.to_csv(OutLabel)
|
from django.shortcuts import render
from django.http import HttpResponse
from cryptography.fernet import Fernet
# Create your views here.
def homepage(request):
return render(request, 'homepage.html')
def get_secret_link(request):
print ("here")
Entered_Text = request.POST.get('Entered_Text')
#generating secret key
key = Fernet.generate_key()
#assigning the secret key to a variable
fernet = Fernet(key)
encrypted_secret = fernet.encrypt(Entered_Text.encode())
decrypted_secret = fernet.decrypt(encrypted_secret).decode()
components = {
"Plain_Text": Entered_Text,
"Key": str(key),
"Cipher_or_Encrypted_Text": str(encrypted_secret),
}
print ("Entered Text: " + Entered_Text + "\nEncrypted Text: " + str(encrypted_secret) + "\nDecrypted Text: " + decrypted_secret + "\nKey: " + str(key))
# return HttpResponse("Secret Key: %s" %Entered_Text)
return render(request=request, template_name="Output.html", context=components) |
#!/usr/bin/env python3
"""Split reports into separate files per appid"""
# pylint: disable=invalid-name
import json
import os
import sys
from datetime import datetime
def load_reports():
all_reports = {}
last_timestamp = 0
with open('reports_piiremoved.json') as reports_file:
reports = json.load(reports_file)
for r in reports:
timestamp = int(r['timestamp'])
title = r['title']
if timestamp > last_timestamp:
last_timestamp = timestamp
if not 'appId' in r:
print(f'W: {title} @ {timestamp} is missing appId', file=sys.stderr)
continue
app_id = r['appId'].strip()
try:
int(app_id)
except ValueError:
print(f'W: {title} @ {timestamp} has non-numeric appId', file=sys.stderr)
continue
if not app_id in all_reports:
all_reports[app_id] = []
all_reports[app_id].append(r)
dt = datetime.utcfromtimestamp(last_timestamp).strftime('%Y-%m-%d %H:%M:%S')
print(f'I: the newest report from {dt}', file=sys.stderr)
return all_reports
def main():
app_reports = load_reports()
os.makedirs('json', exist_ok=True)
for app_id, reports in app_reports.items():
with open(f'json/{app_id}-reports.json', 'w') as f:
f.write(json.dumps(reports, indent=2))
if __name__ == "__main__":
main()
|
import sys
from lib.utils import Size
class Screen(object):
pos:list = None # i, j position of cursor
size:Size = None
def __init__(self, width:int, height:int):
self._set_size(width, height)
self._init_screen()
def _set_size(self, width, height):
assert width > 0, 'Negative screen width'
assert height > 0, 'Negative screen height'
self.size = Size(width=width, height=height)
def _init_screen(self):
w, h = self.size.width, self.size.height
sys.stdout.write(((' ' * w) + '\n')* h)
self.pos = [h, 0]
self._cursor_move_up(h)
def write_char(self, char:str, at=None):
"Writes single char at position `at`. If `at` is `None`, writes at current cursors' position."
assert len(char) == 1, '`char` string must be len of 1'
if at is not None:
self._cursor_move_to(*at)
sys.stdout.write(char)
self._incr_j_pos()
def write_ansi_markup(self, ansi, at=None):
"""Writes `ansi` escape codes (or sequence of codes).
(!) Use it only for writing markup codes.
Any cursor-moving ansi-es would cause inappropiate output.
"""
if at is not None:
self._cursor_move_to(*at)
sys.stdout.write(ansi)
def _incr_j_pos(self, step=1):
self.pos[1] += step
def _cursor_move_up(self, step):
ci = self.pos[0]
self.pos[0] = max(0, ci - step)
sys.stdout.write('\u001b[{}A'.format(ci - self.pos[0]))
def _cursor_move_down(self, step):
ci = self.pos[0]
self.pos[0] = min(self.size.height-1, ci + step)
sys.stdout.write('\u001b[{}B'.format(self.pos[0] - ci))
def _cursor_move_left(self, step):
cj = self.pos[1]
self.pos[1] = max(0, cj - step)
sys.stdout.write('\u001b[{}D'.format(cj - self.pos[1]))
def _cursor_move_right(self, step):
cj = self.pos[1]
self.pos[1] = min(self.size.width-1, cj + step)
sys.stdout.write('\u001b[{}C'.format(self.pos[1] - cj))
def _cursor_move_to(self, i, j):
ci, cj = self.pos
if i > ci:
self._cursor_move_down(i - ci)
elif i < ci:
self._cursor_move_up(ci - i)
if j > cj:
self._cursor_move_right(j - cj)
elif j < cj:
self._cursor_move_left(cj - j)
def cursor_move(self, i, j):
"Moves cursor to `i`, `j` 0-based position. The moving is bounded by `size` of `Screen`."
self._cursor_move_to(i, j)
def cursor_move_outside(self):
"""Moves cursor just after the `Screen` (start of line with y = `Screen.size.height`.
Useful for ending."""
self._cursor_move_to(self.size.height, 0)
sys.stdout.write('\u001b[{}B'.format(1))
if __name__ == '__main__':
scr = Screen(width=10, height=20)
scr.write_ansi_markup('\u001b[38;5;100m', (0, 0))
scr.write_char('a', (0, 0))
scr.write_ansi_markup('\u001b[0m', (0, 0))
scr.write_char('b', (0, 1))
scr.write_char('c', (2, 3))
scr.cursor_move_outside()
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import pytest # type: ignore
from sensibility import Language
from sensibility.language.java import java
from sensibility import Position
from location_factory import LocationFactory
test_file_good = r"""package ca.ualberta.cs.emplab.example;
cl\u0061ss Example // This will still compile:\u000A{
}
"""
test_file_bad = r"""package ca.ualberta.cs.emplab.example;
class Example {
static final int NO_MORE_DOCS = -1;
static {
for (int i = 0; i < scorers.length; i++) {
if (scorers[i].nextDoc() == NO_MORE_DOCS)
lastDoc = NO_MORE_DOCS;
return;
}
}
}
}
"""
# c269bfeb157c6dce747f1c786e6136de31cc6700eb73e38e81eef47e2dfc00a4
test_file_really_bad = r"""class RenameTest {
static void fo<caret>o1(Number n) {
System.out.println("1");
}
static void foo2(Long i) {
System.out.println("2");
}
public static void main(String[] args) {
long n = 0;
foo1(n);
}
}"""
def test_sanity_check() -> None:
assert java.id == 'java'
def test_check_syntax():
assert java.check_syntax(test_file_good)
assert not java.check_syntax(test_file_bad)
# Invalid token
assert java.check_syntax('#') is False
assert java.check_syntax(test_file_really_bad) is False
def test_summarize() -> None:
summary = java.summarize(test_file_good)
assert summary.n_tokens == 15
# Return the PHYSICAL number of lines of code.
# There are 4 logical lines in this example, caused by the \u000A escape.
assert summary.sloc == 3
def test_vocabularize(c) -> None:
loc = LocationFactory(Position(line=1, column=0))
result = list(java.vocabularize_with_locations(test_file_bad))
expected = [
(loc.across(len("package")), c('package')),
(loc.space().across(len('ca')), 'IDENTIFIER'),
(loc.across(1), c('.')),
(loc.across(len('ualberta')), 'IDENTIFIER'),
(loc.across(1), c('.')),
(loc.across(len('cs')), 'IDENTIFIER'),
(loc.across(1), c('.')),
(loc.across(len('emplab')), 'IDENTIFIER'),
(loc.across(1), c('.')),
(loc.across(len('example')), 'IDENTIFIER'),
(loc.across(1), c(';')),
(loc.next_line().next_line().across(5), c('class')),
]
assert result[:len(expected)] == expected
def test_vocabulary() -> None:
"""
Test whether every entry is unique and source-representable.
"""
unique_entries = set(java.vocabulary.representable_indicies())
entries_seen = 0
for idx in java.vocabulary.representable_indicies():
text = java.vocabulary.to_source_text(idx)
# What happens when we reparse the single token?
tokens = tuple(java.vocabularize(text))
assert len(tokens) == 1
actual_idx = java.vocabulary.to_index(tokens[0])
assert idx == actual_idx
entries_seen += 1
assert len(unique_entries) == entries_seen
def test_tokenize_invalid():
tokens = list(java.tokenize('#'))
assert len(tokens) == 1
assert tokens[0].name == 'ERROR'
def test_tokenize_evil():
# I'm learning awfull things about Java today
# For a good time, read The Java SE Specification Section §3.3
# https://docs.oracle.com/javase/specs/jls/se7/html/jls-3.html#jls-3.3
# Then follow that up with The Java SE Specification Section §3.5
# https://docs.oracle.com/javase/specs/jls/se7/html/jls-3.html#jls-3.5
# tokens = list(java.tokenize('p. \\u0042 \\uuu003B\u001a'))
tokens = list(java.tokenize('p. \\u0042 \\uuu003B'))
assert 4 == len(tokens)
assert tokens[0].value == 'p'
assert tokens[1].value == '.'
assert tokens[2].value == 'B'
assert tokens[3].value == ';'
|
import read
import pandas as pd
from sklearn.svm import LinearSVC, SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
print("Reading data ...")
x_all, y_all = read.read_multiclass(load_data=False)
x_train, x_test, y_train, y_test = train_test_split(x_all, y_all, test_size=0.3, random_state=42)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
# model = LinearSVC(C=1000,max_iter=10000)
model = SVC(C=1000, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma=0.01, kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
# [#SVC(),
# load dataset
data = pd.read_csv('./sha256_family.csv')
# rows and columns of the data
print(data.shape)
# visualise the dataset
data.head()
target_names = data['family'].unique()
# for model in models:
print("Fitting SVM ...")
model.fit(x_train, y_train)
# import joblib
# # save the model to disk
# filename = './finalized_model_multi_SVC.joblib'
# joblib.dump(model, filename)
import joblib
filename = './finalized_model_multi_SVC.joblib'
# load the model from disk
model = joblib.load(filename)
print("Evaluating ...")
print("x_test:", x_test[0])
predicted = model.predict([[1, 1, 1, 1, 1, 1, 1, 1]])
print("Predicteddddddddddddddd", predicted)
y_pred = model.predict(x_test[:10])
print("Y predict", y_pred)
print("Accuracy is %f." % accuracy_score(y_test[:10], y_pred[:10]))
print(confusion_matrix(y_test[:10], y_pred))
print("Precision score is %f." % precision_score(y_test[:10], y_pred, average='micro'))
print("Recall score is %f." % recall_score(y_test[:10], y_pred, average='micro'))
print("F1 score is %f." % f1_score(y_test[:10], y_pred, average='micro'))
print("-----------------------------------")
print('Confusion Matrix')
print(confusion_matrix(y_test[:10], y_pred))
print('Classification Report')
# #target_names = ['Not Virus','Virus']
# print(classification_report(y_test, y_pred, target_names))
from sklearn.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
# Plot non-normalized confusion matrix
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(model, x_test[:10], y_test[:10],
display_labels=target_names,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
|
n=1
v = 0
v2 = 20000
for c in range(1,6,1):
peso = float(input('Peso da {}ª pessoa em kg: '.format(n)))
n += 1
if v < peso:
v = peso
elif v2 > peso:
v2 = peso
print('O maior peso é {} kg'.format(v))
print('O menor peso é {} kg'.format(v2)) |
#vars.py
X0=0
X1=0
X2=0
X3=0
#M=1
|
import logging
import tornado.web
class BaseHandler(tornado.web.RequestHandler):
def client_for(self, database):
return self.application.db.client(self.selected_server(), database)
def selected_server(self):
server = self.get_cookie("selected-server") or self.application.db.server_names()[0]
if self.get_cookie("selected-server") == None:
self.set_cookie("selected-server", server)
return server
|
import sys
lines = []
for line in sys.stdin:
lines.append(line.rstrip('\n'))
res = []
divisible = [2, 3, 5, 7, 11]
for color in lines[1:]:
for div in divisible:
if (int(color) % div == 0):
if div not in res:
res.append(div)
print(" ".join([str(i) for i in sorted(res)])) |
from datetime import date
from django.db import models
from django.db.models import Q, F
from django.utils.translation import get_language
from reverse_unique import ReverseUnique
class Article(models.Model):
pub_date = models.DateField()
active_translation = ReverseUnique(
"ArticleTranslation", filters=Q(lang=get_language))
class Meta:
app_label = 'reverse_unique'
class Lang(models.Model):
code = models.CharField(max_length=2, primary_key=True)
class Meta:
app_label = 'reverse_unique'
class ArticleTranslation(models.Model):
article = models.ForeignKey(Article)
lang = models.ForeignKey(Lang)
title = models.CharField(max_length=100)
abstract = models.CharField(max_length=100, null=True)
body = models.TextField()
class Meta:
unique_together = ('article', 'lang')
app_label = 'reverse_unique'
# The idea for DefaultTranslationArticle is that article's have default
# language. This allows testing of filter condition targeting both
# tables in the join.
class DefaultTranslationArticle(models.Model):
pub_date = models.DateField()
default_lang = models.CharField(max_length=2)
active_translation = ReverseUnique(
"DefaultTranslationArticleTranslation", filters=Q(lang=get_language))
default_translation = ReverseUnique(
"DefaultTranslationArticleTranslation", filters=Q(lang=F('article__default_lang')))
class Meta:
app_label = 'reverse_unique'
class DefaultTranslationArticleTranslation(models.Model):
article = models.ForeignKey(DefaultTranslationArticle)
lang = models.CharField(max_length=2)
title = models.CharField(max_length=100)
abstract = models.CharField(max_length=100, null=True)
body = models.TextField()
class Meta:
unique_together = ('article', 'lang')
app_label = 'reverse_unique'
class Guest(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'reverse_unique'
class Room(models.Model):
current_reservation = ReverseUnique(
"Reservation", through='reservations',
filters=(Q(from_date__lte=date.today) & (
Q(until_date__gte=date.today) | Q(until_date__isnull=True))))
class Meta:
app_label = 'reverse_unique'
class Reservation(models.Model):
room = models.ForeignKey(Room, related_name='reservations')
guest = models.ForeignKey(Guest)
from_date = models.DateField()
until_date = models.DateField(null=True) # NULL means reservation "forever".
class Meta:
app_label = 'reverse_unique'
class Parent(models.Model):
rel1 = ReverseUnique("Rel1", filters=Q(f1="foo"))
uniq_field = models.CharField(max_length=10, unique=True, null=True)
class Meta:
app_label = 'reverse_unique'
class Rel1(models.Model):
parent = models.ForeignKey(Parent, related_name="rel1list")
f1 = models.CharField(max_length=10)
class Meta:
app_label = 'reverse_unique'
class Child(Parent):
rel2 = ReverseUnique("Rel2", filters=Q(f1="foo"))
class Meta:
app_label = 'reverse_unique'
class AnotherChild(Child):
rel1_child = ReverseUnique("Rel1", filters=Q(f1__startswith="foo"))
class Meta:
app_label = 'reverse_unique'
class Rel2(models.Model):
child = models.ForeignKey(Child, related_name="rel2list")
f1 = models.CharField(max_length=10)
class Meta:
app_label = 'reverse_unique'
class Rel3(models.Model):
a_model = models.ForeignKey(Parent, to_field='uniq_field')
class Meta:
app_label = 'reverse_unique'
|
#
# PySNMP MIB module Dell-BRIDGE-SECURITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Dell-BRIDGE-SECURITY
# Produced by pysmi-0.3.4 at Mon Apr 29 18:40:37 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint")
rnd, = mibBuilder.importSymbols("Dell-MIB", "rnd")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
VlanId, = mibBuilder.importSymbols("Q-BRIDGE-MIB", "VlanId")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, iso, Counter64, IpAddress, Counter32, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Unsigned32, ModuleIdentity, TimeTicks, ObjectIdentity, MibIdentifier, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "iso", "Counter64", "IpAddress", "Counter32", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Unsigned32", "ModuleIdentity", "TimeTicks", "ObjectIdentity", "MibIdentifier", "Bits")
RowStatus, TextualConvention, MacAddress, TruthValue, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "MacAddress", "TruthValue", "DisplayString")
rlBridgeSecurity = ModuleIdentity((1, 3, 6, 1, 4, 1, 89, 112))
if mibBuilder.loadTexts: rlBridgeSecurity.setLastUpdated('200604020000Z')
if mibBuilder.loadTexts: rlBridgeSecurity.setOrganization('')
rlIpDhcpSnoop = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 112, 1))
rlIpSourceGuard = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 112, 2))
rlIpArpInspect = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 112, 3))
rlProtocolFiltering = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 112, 4))
rlIpDhcpSnoopMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpDhcpSnoopMibVersion.setStatus('current')
rlIpDhcpSnoopEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopEnable.setStatus('current')
rlIpDhcpSnoopFileEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopFileEnable.setStatus('current')
rlIpDhcpSnoopClearAction = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noAction", 1), ("clearNow", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopClearAction.setStatus('current')
rlIpDhcpSnoopFileUpdateTime = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(600, 86400))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopFileUpdateTime.setStatus('current')
rlIpDhcpSnoopVerifyMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopVerifyMacAddress.setStatus('current')
rlIpDhcpSnoopCurrentEntiresNumber = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpDhcpSnoopCurrentEntiresNumber.setStatus('current')
rlIpDhcpOpt82InsertionEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpOpt82InsertionEnable.setStatus('current')
rlIpDhcpOpt82RxOnUntrustedEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpOpt82RxOnUntrustedEnable.setStatus('current')
rlIpDhcpSnoopStaticTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 1, 10), )
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticTable.setStatus('current')
rlIpDhcpSnoopStaticEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 1, 10, 1), ).setIndexNames((0, "Dell-BRIDGE-SECURITY", "rlIpDhcpSnoopStaticVLANTag"), (0, "Dell-BRIDGE-SECURITY", "rlIpDhcpSnoopStaticMACAddress"))
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticEntry.setStatus('current')
rlIpDhcpSnoopStaticVLANTag = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 10, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticVLANTag.setStatus('current')
rlIpDhcpSnoopStaticMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 10, 1, 2), MacAddress())
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticMACAddress.setStatus('current')
rlIpDhcpSnoopStaticIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 10, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticIPAddress.setStatus('current')
rlIpDhcpSnoopStaticPortInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 10, 1, 4), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticPortInterface.setStatus('current')
rlIpDhcpSnoopStaticRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 10, 1, 5), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopStaticRowStatus.setStatus('current')
class RlIpDhcpSnoopType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("learnedByProtocol", 1), ("deletedByTimeout", 2), ("static", 3))
rlIpDhcpSnoopTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 1, 11), )
if mibBuilder.loadTexts: rlIpDhcpSnoopTable.setStatus('current')
rlIpDhcpSnoopEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 1, 11, 1), ).setIndexNames((0, "Dell-BRIDGE-SECURITY", "rlIpDhcpSnoopVLANTag"), (0, "Dell-BRIDGE-SECURITY", "rlIpDhcpSnoopMACAddress"))
if mibBuilder.loadTexts: rlIpDhcpSnoopEntry.setStatus('current')
rlIpDhcpSnoopVLANTag = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 11, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpDhcpSnoopVLANTag.setStatus('current')
rlIpDhcpSnoopMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 11, 1, 2), MacAddress())
if mibBuilder.loadTexts: rlIpDhcpSnoopMACAddress.setStatus('current')
rlIpDhcpSnoopType = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 11, 1, 3), RlIpDhcpSnoopType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopType.setStatus('current')
rlIpDhcpSnoopLeaseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 11, 1, 4), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopLeaseTime.setStatus('current')
rlIpDhcpSnoopIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 11, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopIPAddress.setStatus('current')
rlIpDhcpSnoopPortInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 11, 1, 6), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopPortInterface.setStatus('current')
rlIpDhcpSnoopRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 11, 1, 7), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopRowStatus.setStatus('current')
rlIpDhcpSnoopEnableVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 1, 12), )
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanTable.setStatus('current')
rlIpDhcpSnoopEnableVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 1, 12, 1), ).setIndexNames((0, "Dell-BRIDGE-SECURITY", "rlIpDhcpSnoopEnableVlanTag"))
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanEntry.setStatus('current')
rlIpDhcpSnoopEnableVlanTag = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 12, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanTag.setStatus('current')
rlIpDhcpSnoopEnableVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 12, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopEnableVlanRowStatus.setStatus('current')
rlIpDhcpSnoopTrustedPortTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 1, 13), )
if mibBuilder.loadTexts: rlIpDhcpSnoopTrustedPortTable.setStatus('current')
rlIpDhcpSnoopTrustedPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 1, 13, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlIpDhcpSnoopTrustedPortEntry.setStatus('current')
rlIpDhcpSnoopTrustedPortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 1, 13, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpDhcpSnoopTrustedPortRowStatus.setStatus('current')
rlIpSourceGuardMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardMibVersion.setStatus('current')
rlIpSourceGuardEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpSourceGuardEnable.setStatus('current')
rlIpSourceGuardRetryToInsert = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("noAction", 0), ("retryToInsertNow", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpSourceGuardRetryToInsert.setStatus('current')
rlIpSourceGuardRetryTime = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpSourceGuardRetryTime.setStatus('current')
rlIpSourceGuardPortTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 2, 5), )
if mibBuilder.loadTexts: rlIpSourceGuardPortTable.setStatus('current')
rlIpSourceGuardPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 2, 5, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlIpSourceGuardPortEntry.setStatus('current')
rlIpSourceGuardPortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 5, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpSourceGuardPortRowStatus.setStatus('current')
class RlIpSourceGuardType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("dynamic", 1), ("static", 2))
class RlIpSourceGuardStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("active", 1), ("inactive", 2))
class RlIpSourceGuardFailReason(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("noProblem", 1), ("noResource", 2), ("noSnoopVlan", 3), ("trustPort", 4))
rlIpSourceGuardTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 2, 6), )
if mibBuilder.loadTexts: rlIpSourceGuardTable.setStatus('current')
rlIpSourceGuardEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 2, 6, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "Dell-BRIDGE-SECURITY", "rlIpSourceGuardIPAddress"), (0, "Dell-BRIDGE-SECURITY", "rlIpSourceGuardVLANTag"))
if mibBuilder.loadTexts: rlIpSourceGuardEntry.setStatus('current')
rlIpSourceGuardIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 6, 1, 1), IpAddress())
if mibBuilder.loadTexts: rlIpSourceGuardIPAddress.setStatus('current')
rlIpSourceGuardVLANTag = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 6, 1, 2), VlanId())
if mibBuilder.loadTexts: rlIpSourceGuardVLANTag.setStatus('current')
rlIpSourceGuardMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 6, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardMACAddress.setStatus('current')
rlIpSourceGuardType = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 6, 1, 4), RlIpSourceGuardType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardType.setStatus('current')
rlIpSourceGuardStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 6, 1, 5), RlIpSourceGuardStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardStatus.setStatus('current')
rlIpSourceGuardFailReason = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 6, 1, 6), RlIpSourceGuardFailReason()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardFailReason.setStatus('current')
rlIpSourceGuardPermittedRuleCounterTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 2, 7), )
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterTable.setStatus('current')
rlIpSourceGuardPermittedRuleCounterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 2, 7, 1), ).setIndexNames((0, "Dell-BRIDGE-SECURITY", "rlIpSourceGuardPermittedRuleCounterVLANTag"))
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterEntry.setStatus('current')
rlIpSourceGuardPermittedRuleCounterVLANTag = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 7, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterVLANTag.setStatus('current')
rlIpSourceGuardPermittedRuleCounterNumOfStaticRules = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 7, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterNumOfStaticRules.setStatus('current')
rlIpSourceGuardPermittedRuleCounterNumOfDhcpRules = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 2, 7, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpSourceGuardPermittedRuleCounterNumOfDhcpRules.setStatus('current')
class RlIpArpInspectListNameType(DisplayString):
status = 'current'
subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(1, 32)
rlIpArpInspectMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpArpInspectMibVersion.setStatus('current')
rlIpArpInspectEnable = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectEnable.setStatus('current')
rlIpArpInspectLogInterval = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 3, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectLogInterval.setStatus('current')
rlIpArpInspectValidation = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectValidation.setStatus('current')
rlIpArpInspectListTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 3, 5), )
if mibBuilder.loadTexts: rlIpArpInspectListTable.setStatus('current')
rlIpArpInspectListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 3, 5, 1), ).setIndexNames((0, "Dell-BRIDGE-SECURITY", "rlIpArpInspectListName"), (0, "Dell-BRIDGE-SECURITY", "rlIpArpInspectListIPAddress"))
if mibBuilder.loadTexts: rlIpArpInspectListEntry.setStatus('current')
rlIpArpInspectListName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 5, 1, 1), RlIpArpInspectListNameType())
if mibBuilder.loadTexts: rlIpArpInspectListName.setStatus('current')
rlIpArpInspectListIPAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 5, 1, 2), IpAddress())
if mibBuilder.loadTexts: rlIpArpInspectListIPAddress.setStatus('current')
rlIpArpInspectListMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 5, 1, 3), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectListMACAddress.setStatus('current')
rlIpArpInspectListRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 5, 1, 4), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectListRowStatus.setStatus('current')
rlIpArpInspectEnableVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 3, 6), )
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanTable.setStatus('current')
rlIpArpInspectEnableVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 3, 6, 1), ).setIndexNames((0, "Dell-BRIDGE-SECURITY", "rlIpArpInspectEnableVlanTag"))
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanEntry.setStatus('current')
rlIpArpInspectEnableVlanTag = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 6, 1, 1), VlanId())
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanTag.setStatus('current')
rlIpArpInspectAssignedListName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 6, 1, 2), RlIpArpInspectListNameType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectAssignedListName.setStatus('current')
rlIpArpInspectEnableVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 6, 1, 3), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectEnableVlanRowStatus.setStatus('current')
rlIpArpInspectVlanNumOfArpForwarded = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 6, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpArpInspectVlanNumOfArpForwarded.setStatus('current')
rlIpArpInspectVlanNumOfArpDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 6, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpArpInspectVlanNumOfArpDropped.setStatus('current')
rlIpArpInspectVlanNumOfArpMismatched = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 6, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlIpArpInspectVlanNumOfArpMismatched.setStatus('current')
rlIpArpInspectVlanClearCountersAction = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 6, 1, 7), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectVlanClearCountersAction.setStatus('current')
rlIpArpInspectTrustedPortTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 3, 7), )
if mibBuilder.loadTexts: rlIpArpInspectTrustedPortTable.setStatus('current')
rlIpArpInspectTrustedPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 3, 7, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlIpArpInspectTrustedPortEntry.setStatus('current')
rlIpArpInspectTrustedPortRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 3, 7, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectTrustedPortRowStatus.setStatus('current')
rlIpArpInspectClearCountersAction = MibScalar((1, 3, 6, 1, 4, 1, 89, 112, 3, 8), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlIpArpInspectClearCountersAction.setStatus('current')
class ProtocolFilteringMap(TextualConvention, Bits):
status = 'current'
namedValues = NamedValues(("all", 0), ("cdp", 1), ("vtp", 2), ("dtp", 3), ("udld", 4), ("pagp", 5), ("sstp", 6))
rlProtocolFilteringTable = MibTable((1, 3, 6, 1, 4, 1, 89, 112, 4, 1), )
if mibBuilder.loadTexts: rlProtocolFilteringTable.setStatus('current')
rlProtocolFilteringEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 112, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: rlProtocolFilteringEntry.setStatus('current')
rlProtocolFilteringList = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 4, 1, 1, 1), ProtocolFilteringMap()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlProtocolFilteringList.setStatus('current')
rlProtocolFilteringRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 112, 4, 1, 1, 2), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlProtocolFilteringRowStatus.setStatus('current')
mibBuilder.exportSymbols("Dell-BRIDGE-SECURITY", rlIpArpInspectEnableVlanTag=rlIpArpInspectEnableVlanTag, RlIpSourceGuardFailReason=RlIpSourceGuardFailReason, rlIpSourceGuardTable=rlIpSourceGuardTable, rlIpArpInspectListRowStatus=rlIpArpInspectListRowStatus, RlIpArpInspectListNameType=RlIpArpInspectListNameType, rlBridgeSecurity=rlBridgeSecurity, rlProtocolFilteringRowStatus=rlProtocolFilteringRowStatus, rlProtocolFilteringEntry=rlProtocolFilteringEntry, rlIpDhcpSnoopVerifyMacAddress=rlIpDhcpSnoopVerifyMacAddress, rlIpArpInspectTrustedPortTable=rlIpArpInspectTrustedPortTable, rlIpSourceGuardPermittedRuleCounterTable=rlIpSourceGuardPermittedRuleCounterTable, rlIpArpInspectListMACAddress=rlIpArpInspectListMACAddress, rlIpSourceGuardMACAddress=rlIpSourceGuardMACAddress, rlIpSourceGuardFailReason=rlIpSourceGuardFailReason, rlIpArpInspectListIPAddress=rlIpArpInspectListIPAddress, rlIpDhcpSnoop=rlIpDhcpSnoop, rlIpDhcpSnoopFileUpdateTime=rlIpDhcpSnoopFileUpdateTime, ProtocolFilteringMap=ProtocolFilteringMap, rlIpArpInspectTrustedPortEntry=rlIpArpInspectTrustedPortEntry, rlIpArpInspect=rlIpArpInspect, rlIpDhcpSnoopEnableVlanEntry=rlIpDhcpSnoopEnableVlanEntry, rlIpSourceGuardType=rlIpSourceGuardType, rlIpDhcpSnoopRowStatus=rlIpDhcpSnoopRowStatus, rlIpSourceGuardStatus=rlIpSourceGuardStatus, rlIpSourceGuardMibVersion=rlIpSourceGuardMibVersion, rlIpDhcpSnoopStaticVLANTag=rlIpDhcpSnoopStaticVLANTag, rlIpDhcpSnoopIPAddress=rlIpDhcpSnoopIPAddress, rlIpDhcpOpt82RxOnUntrustedEnable=rlIpDhcpOpt82RxOnUntrustedEnable, rlIpArpInspectAssignedListName=rlIpArpInspectAssignedListName, rlIpArpInspectTrustedPortRowStatus=rlIpArpInspectTrustedPortRowStatus, rlIpSourceGuardEnable=rlIpSourceGuardEnable, rlIpArpInspectListEntry=rlIpArpInspectListEntry, rlIpDhcpSnoopTable=rlIpDhcpSnoopTable, rlIpDhcpSnoopLeaseTime=rlIpDhcpSnoopLeaseTime, rlIpDhcpSnoopClearAction=rlIpDhcpSnoopClearAction, rlIpArpInspectListTable=rlIpArpInspectListTable, rlIpArpInspectVlanClearCountersAction=rlIpArpInspectVlanClearCountersAction, rlIpDhcpSnoopStaticMACAddress=rlIpDhcpSnoopStaticMACAddress, rlIpDhcpSnoopStaticEntry=rlIpDhcpSnoopStaticEntry, rlIpSourceGuardPermittedRuleCounterEntry=rlIpSourceGuardPermittedRuleCounterEntry, rlIpSourceGuardVLANTag=rlIpSourceGuardVLANTag, rlIpDhcpSnoopEnableVlanTag=rlIpDhcpSnoopEnableVlanTag, rlIpSourceGuardPermittedRuleCounterNumOfStaticRules=rlIpSourceGuardPermittedRuleCounterNumOfStaticRules, rlIpDhcpSnoopMibVersion=rlIpDhcpSnoopMibVersion, rlProtocolFilteringList=rlProtocolFilteringList, rlProtocolFilteringTable=rlProtocolFilteringTable, rlIpDhcpSnoopEnableVlanRowStatus=rlIpDhcpSnoopEnableVlanRowStatus, rlIpDhcpSnoopStaticTable=rlIpDhcpSnoopStaticTable, rlIpDhcpSnoopMACAddress=rlIpDhcpSnoopMACAddress, rlProtocolFiltering=rlProtocolFiltering, rlIpSourceGuardEntry=rlIpSourceGuardEntry, rlIpDhcpSnoopTrustedPortEntry=rlIpDhcpSnoopTrustedPortEntry, rlIpDhcpOpt82InsertionEnable=rlIpDhcpOpt82InsertionEnable, rlIpDhcpSnoopStaticRowStatus=rlIpDhcpSnoopStaticRowStatus, RlIpDhcpSnoopType=RlIpDhcpSnoopType, rlIpDhcpSnoopType=rlIpDhcpSnoopType, rlIpDhcpSnoopVLANTag=rlIpDhcpSnoopVLANTag, rlIpSourceGuardPermittedRuleCounterVLANTag=rlIpSourceGuardPermittedRuleCounterVLANTag, rlIpArpInspectEnableVlanRowStatus=rlIpArpInspectEnableVlanRowStatus, rlIpArpInspectVlanNumOfArpForwarded=rlIpArpInspectVlanNumOfArpForwarded, rlIpDhcpSnoopCurrentEntiresNumber=rlIpDhcpSnoopCurrentEntiresNumber, rlIpDhcpSnoopTrustedPortRowStatus=rlIpDhcpSnoopTrustedPortRowStatus, rlIpSourceGuardPermittedRuleCounterNumOfDhcpRules=rlIpSourceGuardPermittedRuleCounterNumOfDhcpRules, rlIpDhcpSnoopEnable=rlIpDhcpSnoopEnable, rlIpArpInspectClearCountersAction=rlIpArpInspectClearCountersAction, rlIpDhcpSnoopTrustedPortTable=rlIpDhcpSnoopTrustedPortTable, rlIpSourceGuardPortTable=rlIpSourceGuardPortTable, RlIpSourceGuardType=RlIpSourceGuardType, rlIpDhcpSnoopEntry=rlIpDhcpSnoopEntry, rlIpArpInspectVlanNumOfArpMismatched=rlIpArpInspectVlanNumOfArpMismatched, rlIpArpInspectListName=rlIpArpInspectListName, rlIpSourceGuardIPAddress=rlIpSourceGuardIPAddress, rlIpArpInspectEnableVlanTable=rlIpArpInspectEnableVlanTable, rlIpDhcpSnoopStaticIPAddress=rlIpDhcpSnoopStaticIPAddress, rlIpArpInspectVlanNumOfArpDropped=rlIpArpInspectVlanNumOfArpDropped, rlIpSourceGuardRetryTime=rlIpSourceGuardRetryTime, PYSNMP_MODULE_ID=rlBridgeSecurity, rlIpArpInspectValidation=rlIpArpInspectValidation, rlIpArpInspectEnable=rlIpArpInspectEnable, rlIpArpInspectLogInterval=rlIpArpInspectLogInterval, rlIpArpInspectEnableVlanEntry=rlIpArpInspectEnableVlanEntry, RlIpSourceGuardStatus=RlIpSourceGuardStatus, rlIpDhcpSnoopStaticPortInterface=rlIpDhcpSnoopStaticPortInterface, rlIpDhcpSnoopFileEnable=rlIpDhcpSnoopFileEnable, rlIpSourceGuardPortRowStatus=rlIpSourceGuardPortRowStatus, rlIpSourceGuard=rlIpSourceGuard, rlIpDhcpSnoopPortInterface=rlIpDhcpSnoopPortInterface, rlIpDhcpSnoopEnableVlanTable=rlIpDhcpSnoopEnableVlanTable, rlIpSourceGuardPortEntry=rlIpSourceGuardPortEntry, rlIpArpInspectMibVersion=rlIpArpInspectMibVersion, rlIpSourceGuardRetryToInsert=rlIpSourceGuardRetryToInsert)
|
# Copyright (c) 2016 IBM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _LE, _LI
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.db import dns_db
from neutron.db import models_v2
from neutron.extensions import dns
from neutron import manager
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.services.externaldns import driver
LOG = logging.getLogger(__name__)
class DNSExtensionDriver(api.ExtensionDriver):
_supported_extension_alias = 'dns-integration'
@property
def extension_alias(self):
return self._supported_extension_alias
def process_create_network(self, plugin_context, request_data, db_data):
dns_domain = request_data.get(dns.DNSDOMAIN)
if not attributes.is_attr_set(dns_domain):
return
if dns_domain:
plugin_context.session.add(dns_db.NetworkDNSDomain(
network_id=db_data['id'], dns_domain=dns_domain))
db_data[dns.DNSDOMAIN] = dns_domain
def process_update_network(self, plugin_context, request_data, db_data):
new_value = request_data.get(dns.DNSDOMAIN)
if not attributes.is_attr_set(new_value):
return
current_dns_domain = db_data.get(dns.DNSDOMAIN)
if current_dns_domain == new_value:
return
net_id = db_data['id']
if current_dns_domain:
net_dns_domain = plugin_context.session.query(
dns_db.NetworkDNSDomain).filter_by(network_id=net_id).one()
if new_value:
net_dns_domain['dns_domain'] = new_value
db_data[dns.DNSDOMAIN] = new_value
else:
plugin_context.session.delete(net_dns_domain)
db_data[dns.DNSDOMAIN] = ''
elif new_value:
plugin_context.session.add(dns_db.NetworkDNSDomain(
network_id=net_id, dns_domain=new_value))
db_data[dns.DNSDOMAIN] = new_value
def process_create_port(self, plugin_context, request_data, db_data):
if not request_data[dns.DNSNAME]:
return
network = self._get_network(plugin_context, db_data['network_id'])
if not network[dns.DNSDOMAIN]:
return
if self.external_dns_not_needed(plugin_context, network):
return
plugin_context.session.add(dns_db.PortDNS(
port_id=db_data['id'],
current_dns_name=request_data[dns.DNSNAME],
current_dns_domain=network[dns.DNSDOMAIN],
previous_dns_name='', previous_dns_domain=''))
def process_update_port(self, plugin_context, request_data, db_data):
dns_name = request_data.get(dns.DNSNAME)
if dns_name is None:
return
network = self._get_network(plugin_context, db_data['network_id'])
if not network[dns.DNSDOMAIN]:
return
if self.external_dns_not_needed(plugin_context, network):
return
dns_domain = network[dns.DNSDOMAIN]
dns_data_db = plugin_context.session.query(dns_db.PortDNS).filter_by(
port_id=db_data['id']).one_or_none()
if dns_data_db:
if dns_name:
if dns_data_db['current_dns_name'] != dns_name:
dns_data_db['previous_dns_name'] = (dns_data_db[
'current_dns_name'])
dns_data_db['previous_dns_domain'] = (dns_data_db[
'current_dns_domain'])
dns_data_db['current_dns_name'] = dns_name
dns_data_db['current_dns_domain'] = dns_domain
return
if dns_data_db['current_dns_name']:
dns_data_db['previous_dns_name'] = (dns_data_db[
'current_dns_name'])
dns_data_db['previous_dns_domain'] = (dns_data_db[
'current_dns_domain'])
dns_data_db['current_dns_name'] = ''
dns_data_db['current_dns_domain'] = ''
return
if dns_name:
plugin_context.session.add(dns_db.PortDNS(
port_id=db_data['id'],
current_dns_name=dns_name,
current_dns_domain=dns_domain,
previous_dns_name='', previous_dns_domain=''))
def external_dns_not_needed(self, context, network):
"""Decide if ports in network need to be sent to the DNS service.
:param context: plugin request context
:param network: network dictionary
:return True or False
"""
pass
def extend_network_dict(self, session, db_data, response_data):
response_data[dns.DNSDOMAIN] = ''
if db_data.dns_domain:
response_data[dns.DNSDOMAIN] = db_data.dns_domain[dns.DNSDOMAIN]
return response_data
def extend_port_dict(self, session, db_data, response_data):
response_data[dns.DNSNAME] = db_data[dns.DNSNAME]
return response_data
def _get_network(self, context, network_id):
plugin = manager.NeutronManager.get_plugin()
return plugin.get_network(context, network_id)
class DNSExtensionDriverML2(DNSExtensionDriver):
def initialize(self):
LOG.info(_LI("DNSExtensionDriverML2 initialization complete"))
def _is_tunnel_tenant_network(self, provider_net):
if provider_net['network_type'] == 'geneve':
tunnel_ranges = cfg.CONF.ml2_type_geneve.vni_ranges
elif provider_net['network_type'] == 'vxlan':
tunnel_ranges = cfg.CONF.ml2_type_vxlan.vni_ranges
else:
tunnel_ranges = cfg.CONF.ml2_type_gre.tunnel_id_ranges
segmentation_id = int(provider_net['segmentation_id'])
for entry in tunnel_ranges:
entry = entry.strip()
tun_min, tun_max = entry.split(':')
tun_min = tun_min.strip()
tun_max = tun_max.strip()
return int(tun_min) <= segmentation_id <= int(tun_max)
def _is_vlan_tenant_network(self, provider_net):
network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.ml2_type_vlan.network_vlan_ranges)
vlan_ranges = network_vlan_ranges[provider_net['physical_network']]
if not vlan_ranges:
return False
segmentation_id = int(provider_net['segmentation_id'])
for vlan_range in vlan_ranges:
if vlan_range[0] <= segmentation_id <= vlan_range[1]:
return True
def external_dns_not_needed(self, context, network):
if not DNS_DRIVER:
return True
if network['router:external']:
return True
segments = db.get_network_segments(context.session, network['id'])
if len(segments) > 1:
return False
provider_net = segments[0]
if provider_net['network_type'] == 'local':
return True
if provider_net['network_type'] == 'flat':
return False
if provider_net['network_type'] == 'vlan':
return self._is_vlan_tenant_network(provider_net)
if provider_net['network_type'] in ['gre', 'vxlan', 'geneve']:
return self._is_tunnel_tenant_network(provider_net)
return True
DNS_DRIVER = None
def _get_dns_driver():
global DNS_DRIVER
if DNS_DRIVER:
return DNS_DRIVER
if not cfg.CONF.external_dns_driver:
return
try:
DNS_DRIVER = driver.ExternalDNSService.get_instance()
LOG.debug("External DNS driver loaded: %s",
cfg.CONF.external_dns_driver)
return DNS_DRIVER
except ImportError:
LOG.exception(_LE("ImportError exception occurred while loading "
"the external DNS service driver"))
raise dns.ExternalDNSDriverNotFound(
driver=cfg.CONF.external_dns_driver)
def _create_port_in_external_dns_service(resource, event, trigger, **kwargs):
dns_driver = _get_dns_driver()
if not dns_driver:
return
context = kwargs['context']
port = kwargs['port']
dns_data_db = context.session.query(dns_db.PortDNS).filter_by(
port_id=port['id']).one_or_none()
if not dns_data_db:
return
records = [ip['ip_address'] for ip in port['fixed_ips']]
_send_data_to_external_dns_service(context, dns_driver,
dns_data_db['current_dns_domain'],
dns_data_db['current_dns_name'],
records)
def _send_data_to_external_dns_service(context, dns_driver, dns_domain,
dns_name, records):
try:
dns_driver.create_record_set(context, dns_domain, dns_name, records)
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
LOG.exception(_LE("Error publishing port data in external DNS "
"service. Name: '%(name)s'. Domain: '%(domain)s'. "
"DNS service driver message '%(message)s'")
% {"name": dns_name,
"domain": dns_domain,
"message": e.msg})
def _remove_data_from_external_dns_service(context, dns_driver, dns_domain,
dns_name, records):
try:
dns_driver.delete_record_set(context, dns_domain, dns_name, records)
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
LOG.exception(_LE("Error deleting port data from external DNS "
"service. Name: '%(name)s'. Domain: '%(domain)s'. "
"IP addresses '%(ips)s'. DNS service driver message "
"'%(message)s'")
% {"name": dns_name,
"domain": dns_domain,
"message": e.msg,
"ips": ', '.join(records)})
def _update_port_in_external_dns_service(resource, event, trigger, **kwargs):
dns_driver = _get_dns_driver()
if not dns_driver:
return
context = kwargs['context']
updated_port = kwargs['port']
original_port = kwargs.get('original_port')
if not original_port:
return
if updated_port[dns.DNSNAME] == original_port[dns.DNSNAME]:
return
dns_data_db = context.session.query(dns_db.PortDNS).filter_by(
port_id=updated_port['id']).one_or_none()
if not dns_data_db:
return
if dns_data_db['previous_dns_name']:
records = [ip['ip_address'] for ip in original_port['fixed_ips']]
_remove_data_from_external_dns_service(
context, dns_driver, dns_data_db['previous_dns_domain'],
dns_data_db['previous_dns_name'], records)
if dns_data_db['current_dns_name']:
records = [ip['ip_address'] for ip in updated_port['fixed_ips']]
_send_data_to_external_dns_service(context, dns_driver,
dns_data_db['current_dns_domain'],
dns_data_db['current_dns_name'],
records)
def _delete_port_in_external_dns_service(resource, event, trigger, **kwargs):
dns_driver = _get_dns_driver()
if not dns_driver:
return
context = kwargs['context']
port_id = kwargs['port_id']
dns_data_db = context.session.query(dns_db.PortDNS).filter_by(
port_id=port_id).one_or_none()
if not dns_data_db:
return
if dns_data_db['current_dns_name']:
ip_allocations = context.session.query(
models_v2.IPAllocation).filter_by(port_id=port_id).all()
records = [alloc['ip_address'] for alloc in ip_allocations]
_remove_data_from_external_dns_service(
context, dns_driver, dns_data_db['current_dns_domain'],
dns_data_db['current_dns_name'], records)
def subscribe():
registry.subscribe(
_create_port_in_external_dns_service, resources.PORT,
events.AFTER_CREATE)
registry.subscribe(
_update_port_in_external_dns_service, resources.PORT,
events.AFTER_UPDATE)
registry.subscribe(
_delete_port_in_external_dns_service, resources.PORT,
events.BEFORE_DELETE)
subscribe()
|
#!/usr/bin/python3
import sys,os
import cozir
import time
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib.dates as mdates
import matplotlib.units as munits
import numpy as np
import datetime
import timeit
# graphics: https://github.com/vfilimonov/co2meter
# prepare matplotlib
converter = mdates.ConciseDateConverter()
munits.registry[np.datetime64] = converter
munits.registry[datetime.date] = converter
munits.registry[datetime.datetime] = converter
# start matplotlib interactive mode
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot([0,1], [0,0], 'r-')
plt.show()
# settings
sampling_time = 5 # [s]
N = (int)(60/sampling_time) # length of moving average
# open sensor handle
c = cozir.Cozir('/dev/ttyUSB0')
co2_list = []
t_list = []
start_timestamp = (float)(datetime.datetime.now().timestamp())
is_colorbar_set = False
i = 0
# test maximum sampling frequency of sensor
if False:
start = timeit.default_timer()
for i in range(100):
value = c.read_CO2()
stop = timeit.default_timer()
print("duration per measurement: {}ms".format((stop-start)*10))
# enter main loop of measurement
while True:
i += 1
# capture current ppm value of CO2 concentration
try:
value = c.read_CO2()
except:
pass
# store value
co2_list.append(value)
#t_list.append((float)(datetime.datetime.now().timestamp()) - start_timestamp)
t_list.append(datetime.datetime.now())
print('{} CO2: {} ppm'.format(datetime.datetime.now(), value))
#line1.set_xdata(t_list)
#line1.set_ydata(co2_list)
# air quality: Outdoor air contains approximately 400 ppm; breathing generates CO2, so the indoor CO2 concentration will always be at least 400 ppm and usually higher. An indoor CO2 level of 1 150 ppm provides adequate air quality, 1 400 ppm will ensure good indoor air quality in most situations, and 1 600 ppm indicates poor air quality (CEN, 2019; Active house Alliance, 2020).
# 400 ppm - outdoor
# 1150 ppm - adequate
# 1400 ppm - good
# 1600 ppm - poor
ax.clear()
if i < 120:
ax.plot(t_list, co2_list, color="grey", marker=".", linestyle="None")
# plot moving average
if len(co2_list) > N:
co2_list_moving_average = np.convolve(co2_list, np.ones(N)/N, mode='valid')
inxval = mdates.date2num(t_list[N-1:])
points = np.array([inxval, co2_list_moving_average]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
cmap = ListedColormap(['darkgreen', 'g', 'y', 'r'])
outdoor_concentration = 700
norm = BoundaryNorm([outdoor_concentration+0, outdoor_concentration+400, outdoor_concentration+600, outdoor_concentration+1000, 10000], cmap.N)
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(co2_list_moving_average)
lc.set_linewidth(2)
line = ax.add_collection(lc)
if not is_colorbar_set:
fig.colorbar(line, ax=ax)
plt.figtext(0.2, 0.92, "$CO_2$ Konzentration", ha="left")
plt.figtext(0.95, 0.92, "Raumluftqualität gemäß DIN EN 13779", weight="bold", ha="right")
plt.figtext(0.83, 0.2, "hoch", color="darkgreen", weight="bold")
plt.figtext(0.83, 0.4, "mittel", color="g", weight="bold")
plt.figtext(0.83, 0.6, "mäßig", color="y", weight="bold")
plt.figtext(0.83, 0.8, "niedrig", color="r", weight="bold")
is_colorbar_set = True
#ax.plot(t_list[N-1:], co2_list_moving_average, "r-")
#ax.set_xlabel("time")
ax.set_xlim(t_list[0],t_list[-1]);
#ax.set_title("$CO_2$ concentration")
ax.xaxis_date()
ax.autoscale_view()
ax.set_ylabel("$CO_2$ [ppm], 10.000 ppm = 1 %")
ax.grid(which="both")
# save once per hour
if i % (12*60) == 0:
filename = "saved/{}.png".format(datetime.datetime.strftime(datetime.datetime.now(),"%Y-%m-%d_%H-%M-%S"))
print("save to {}".format(filename))
plt.savefig(filename)
fig.canvas.draw()
fig.canvas.flush_events()
if i < 20:
time.sleep(.2)
elif i < 120:
time.sleep(1)
else:
time.sleep(sampling_time)
|
import argparse
import binascii
import iroha
import os
import test_kv
IROHA_HOST_ADDR = os.getenv('IROHA_HOST_ADDR', '127.0.0.1')
IROHA_PORT = os.getenv('IROHA_PORT', '50051')
ADMIN_ACCOUNT_ID = os.getenv('ADMIN_ACCOUNT_ID', 'admin@test')
ADMIN_PRIVATE_KEY = os.getenv(
'ADMIN_PRIVATE_KEY',
'f101537e319568c765b2cc89698325604991dca57b9716b58016b253506cab70')
iroha_client = iroha.Iroha(ADMIN_ACCOUNT_ID)
net = iroha.IrohaGrpc('{}:{}'.format(IROHA_HOST_ADDR, IROHA_PORT))
def make_base_kv_command():
cmd = test_kv.kv_schema_pb2.Command()
cmd.dm_id.name = 'test_kv'
cmd.dm_id.version = '0.1.0'
return cmd
def make_kv_set_command(key, value):
cmd = make_base_kv_command()
cmd.payload.set.CopyFrom(test_kv.kv_schema_pb2.Set())
cmd.payload.set.key = key
cmd.payload.set.value = value
return cmd
def make_kv_nuke_command():
cmd = make_base_kv_command()
cmd.payload.nuke.CopyFrom(test_kv.kv_schema_pb2.Nuke())
return cmd
def send_transaction_and_print_status(transaction):
hex_hash = binascii.hexlify(iroha.IrohaCrypto.hash(transaction))
print('Transaction hash = {}, creator = {}'.format(
hex_hash, transaction.payload.reduced_payload.creator_account_id))
net.send_tx(transaction)
for status in net.tx_status_stream(transaction):
print(status)
def send_kv_command_and_print_status(kv_command):
command = iroha.commands_pb2.Command()
command.call_model.CopyFrom(
iroha.commands_pb2.CallModel.FromString(
kv_command.SerializePartialToString()))
tx = iroha.IrohaCrypto.sign_transaction(
iroha_client.transaction([command]), ADMIN_PRIVATE_KEY)
send_transaction_and_print_status(tx)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--set',
help='send a set command, param: key=val',
)
parser.add_argument(
'--nuke',
action='store_true',
help='send a nuke command',
)
args = parser.parse_args()
if args.nuke:
send_kv_command_and_print_status(make_kv_nuke_command())
elif args.set:
send_kv_command_and_print_status(
make_kv_set_command(*args.set.split('=')))
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.io.wavfile as wav
wav_fname = 'textbookcode/Ch_03/sw440hz.wav'
fs, data = wav.read(wav_fname)
length = data.shape[0] / fs
print(f"length = {length}s")
# Divide audio signal by max int value for signed 16 bit number
data = data/np.iinfo(np.int16).max
#Set up the time axis for the waveform
time = np.linspace(0, length, data.shape[0])
#Create a synthesized sine wave of twice the frequency
data2 = np.sin(2*np.pi*44100*880*time)
#AM modulate the original waveform by new wave form
data3 = data2*data
#set up the graphs
fig, axes = plt.subplots(nrows=3,ncols=1)
#plot the original waveform
axes[0].plot(time, data, label="Original Audio Signal")
axes[0].set_xlabel("Time [s]")
axes[0].set_ylabel("Amplitude")
axes[0].legend(loc= 7)
axes[0].set_xlim([0,0.05])
#plot the synthesized sine waveform
axes[1].plot(time, data2, label="Modulation Signal")
axes[1].set_xlabel("Time [s]")
axes[1].set_ylabel("Amplitude")
axes[1].legend(loc= 7)
axes[1].set_xlim([0,0.05])
#plot the original waveform being modulated by new wave form
axes[2].plot(time, data3, label="Modulated Audio Signal")
axes[2].set_xlabel("Time [s]")
axes[2].set_ylabel("Amplitude")
axes[2].legend(loc= 7)
axes[2].set_xlim([0,0.05])
#Normalize the audio output level to max output
amplitude = np.iinfo(np.int16).max
data3 = data3*amplitude
#Truncate any non-integer/fractional data
#If we don't do this, the wav file won't be readable
data3 = np.asarray(data3, dtype = np.int16)
#Write the data to an output file
wav.write("textbookcode/Ch_03/sw440hz_modulated.wav", 44100, data3)
plt.show()
|
#!/usr/bin/env python
from os.path import dirname, join
from setuptools import find_packages, setup
import MangAdventure
try:
from isort.settings import default
default['known_django'] = 'django'
except ImportError:
pass
def read(fname):
with open(join(dirname(__file__), fname)) as f:
return f.read()
setup(
name=MangAdventure.__name__,
version=MangAdventure.__version__,
license=MangAdventure.__license__,
author=MangAdventure.__author__,
maintainer=MangAdventure.__author__,
description=MangAdventure.__doc__,
long_description=read('README.md'),
url='https://mangadventure.rtfd.io',
download_url='https://github.com/mangadventure/MangAdventure',
keywords=['manga', 'scanlation', 'reader'],
packages=find_packages(),
python_requires='>=2.7',
install_requires=read('requirements.txt').splitlines(),
extras_require={
'dev': [
'django-debug-toolbar',
'flake8',
'isort',
],
'docs': [
'sphinx',
'sphinx-rtd-theme',
],
'csp': 'django-csp',
'uwsgi': 'uwsgi',
'gunicorn': 'gunicorn'
},
entry_points={
'console_scripts': ['mangadventure = manage:main']
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP :: '
'Dynamic Content :: Content Management System',
]
)
|
from pybulletgym.envs.roboschool.robots.robot_bases import MJCFBasedRobot
import numpy as np
class Reacher(MJCFBasedRobot):
TARG_LIMIT = 0.27
def __init__(self):
MJCFBasedRobot.__init__(self, 'reacher.xml', 'body0', action_dim=2, obs_dim=9)
def robot_specific_reset(self, bullet_client):
self.jdict["target_x"].reset_current_position(
self.np_random.uniform(low=-self.TARG_LIMIT, high=self.TARG_LIMIT), 0)
self.jdict["target_y"].reset_current_position(
self.np_random.uniform(low=-self.TARG_LIMIT, high=self.TARG_LIMIT), 0)
self.fingertip = self.parts["fingertip"]
self.target = self.parts["target"]
self.central_joint = self.jdict["joint0"]
self.elbow_joint = self.jdict["joint1"]
self.central_joint.reset_current_position(self.np_random.uniform(low=-3.14, high=3.14), 0)
self.elbow_joint.reset_current_position(self.np_random.uniform(low=-3.14, high=3.14), 0)
def apply_action(self, a):
assert (np.isfinite(a).all())
self.central_joint.set_motor_torque(0.05 * float(np.clip(a[0], -1, +1)))
self.elbow_joint.set_motor_torque(0.05 * float(np.clip(a[1], -1, +1)))
def calc_state(self):
theta, self.theta_dot = self.central_joint.current_relative_position()
self.gamma, self.gamma_dot = self.elbow_joint.current_relative_position()
target_x, _ = self.jdict["target_x"].current_position()
target_y, _ = self.jdict["target_y"].current_position()
self.to_target_vec = np.array(self.fingertip.pose().xyz()) - np.array(self.target.pose().xyz())
return np.array([
target_x,
target_y,
self.to_target_vec[0],
self.to_target_vec[1],
np.cos(theta),
np.sin(theta),
self.theta_dot,
self.gamma,
self.gamma_dot,
])
def calc_potential(self):
return -100 * np.linalg.norm(self.to_target_vec)
|
# comment.py
"""
Author: Kyeongyul Kim
Date: 2018-03-14
This example shows how to make multi-line comment
"""
print("This example shows how to make multi-line comment")
|
'''
Some relevant metrics
'''
import numpy as np
import pandas as pd
def partial_log_likelihood_ph(log_partial_hazards, durations, events, mean=True):
"""Partial log-likelihood for PH models.
Arguments:
log_partial_hazards {np.array} -- Log partial hazards (e.g. x^T beta).
durations {np.array} -- Durations.
events {np.array} -- Events.
Keyword Arguments:
mean {bool} -- Return the mean. (default: {True})
Returns:
pd.Series or float -- partial log-likelihood or mean.
"""
df = pd.DataFrame(dict(duration=durations, event=events, lph=log_partial_hazards))
pll = (df
.sort_values('duration', ascending=False)
.assign(cum_ph=(lambda x: x['lph']
.pipe(np.exp)
.cumsum()
.groupby(x['duration'])
.transform('max')))
.loc[lambda x: x['event'] == 1]
.assign(pll=lambda x: x['lph'] - np.log(x['cum_ph']))
['pll'])
if mean:
return pll.mean()
return pll
|
# Copyright 2020 David Matthews
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
class Work(object):
"""
Copied from ParallelPy to avoid the need to install ParallelPy for running
"""
def cpus_requested(self):
return 1
def complete_work(self, serial=False):
"""
Completes the required work, and generates a letter to send back to the dispatcher.
:return: A letter to be sent.
"""
self.compute_work(serial=serial)
return self.write_letter()
def compute_work(self, serial=False):
"""
Entry point to do the required computation.
:return: none
"""
raise NotImplementedError
def write_letter(self):
"""
Generates a small packet of data, a Letter, to send back to the dispatcher for it to update itself with the
completed work
:return: A Letter to send back to the dispatcher to update the object with the new data
:rtype: Letter
"""
raise NotImplementedError
def open_letter(self, letter):
"""
A message to send to the dispatcher to update the Work with the resulting computation data.
:param letter: the letter to open
:return: None
"""
raise NotImplementedError
class RobotInterface(Work):
__metaclass__ = ABCMeta
@abstractmethod
def set_id(self, new_id): raise NotImplementedError
@abstractmethod
def get_id(self):
"""
Return a unique identifer for this robot. Must be compareable to other robot ids.
This is used as a tiebreaker in multi-objective optimization.
:return:
"""
raise NotImplementedError
@abstractmethod
def iterate_generation(self):
"""
This method will be called on each robot in the population every generation.
If you implementing AFPO, then use this to update the age.
:return: None
"""
raise NotImplementedError
@abstractmethod
def needs_evaluation(self):
"""
:return: True if you need to be evaluted, false otherwise.
"""
raise NotImplementedError
@abstractmethod
def mutate(self):
"""
Make some mutations. You decide what mutations to make.
:return: None
"""
raise NotImplementedError
@abstractmethod
def dominates(self, other): raise NotImplementedError
@abstractmethod
def dominates_final_selection(self, other): raise NotImplementedError
@abstractmethod
def get_fitness(self): raise NotImplementedError
class MOORobotInterface(RobotInterface):
__metaclass__ = ABCMeta
@abstractmethod
def get_maximize_vals(self): raise NotImplementedError
@abstractmethod
def get_minimize_vals(self): raise NotImplementedError
@abstractmethod
def get_seq_num(self): return self.get_id()
def dominates(self, other):
"""
returns True if self dominates other
:param other: the other Student to compare self to.
:return: True if self dominates other, False otherwise.
"""
self_min_traits = self.get_minimize_vals()
self_max_traits = self.get_maximize_vals()
other_min_traits = other.get_minimize_vals()
other_max_traits = other.get_maximize_vals()
# all min traits must be at least as small as corresponding min traits
if list(filter(lambda x: x[0] > x[1], zip(self_min_traits, other_min_traits))):
return False
# all max traits must be at least as large as corresponding max traits
if list(filter(lambda x: x[0] < x[1], zip(self_max_traits, other_max_traits))):
return False
# any min trait smaller than other min trait
if list(filter(lambda x: x[0] < x[1], zip(self_min_traits, other_min_traits))):
return True
# any max trait larger than other max trait
if list(filter(lambda x: x[0] > x[1], zip(self_max_traits, other_max_traits))):
return True
# all fitness values are the same, default to return False.
return self.get_seq_num() < other.get_seq_num()
class AFPORobotInterface(MOORobotInterface):
__metaclass__ = ABCMeta
def __init__(self, optimize_mode="fitness"):
self.age = 0
self.optimize_mode = optimize_mode
def iterate_generation(self):
self.age += 1
def get_maximize_vals(self):
if self.optimize_mode == "fitness":
return [self.get_fitness()]
elif self.optimize_mode == "error":
return []
def get_minimize_vals(self):
if self.optimize_mode == "error":
return [self.age, self.get_fitness()]
elif self.optimize_mode == "fitness":
return [self.age]
def get_age(self):
return self.age
|
'''
Write a Python program to remove the ANSI escape sequences from a string.
'''
import re
text = "PHP Exercises\tMySQL Exercises\nAlgo Exercises\r"
print("Original Text: ", text)
new_text1 = re.sub(r'[\n|\t|\r]+', r'. ', text)
print("New Text: ", new_text1)
|
import asyncio
import itertools
import functools
import math
import re
from attr import __description__
import discord
import os
from discord.ext import tasks, commands
from discord.ext.commands.errors import MissingPermissions
from discord.user import Profile
from discord.utils import get
from dns.message import Message
import aiomysql
import random
import pyowm
import threading
import time
import datetime
import dbl
class UserProfile(commands.Cog, name = "Profile Commands"):
def __init__(self, bot):
self.bot = bot
@commands.command(name = "user",
description = "Gathers information about a profile (yours by default) and displays the information in an embed.",
brief = "Displays your user profile.",
aliases = ["profile"])
async def User(self, ctx, user: discord.User = None):
if self.bot.SQLConnection == None:
raise self.bot.BOTrasedError("201")
#Has the user mentioned someone else?
if user == None:
#If they haven't, change the target user to the author of the message
user = ctx.message.author
#Is the target user in the database?
if await self.bot.checkUserExists(user.id) == False:
#If not, give back a message depending on who the target is
if user.id == ctx.message.author.id:
raise self.bot.BOTrasedError("200")
else:
raise self.bot.BOTrasedError("404 This user does not have a profile.")
#Return to processing other commands
return
#Fetch the target's profile
userProfile = await self.bot.fetchUserProfile(user.id)
#Does the target have a set message?
if userProfile[4] == "None": #Note: SQL library returns NULL entries as None in a string
#If they don't, set it to a default message
userProfile[4] = "Welcome to my profile!"
#Set up colour variable
colour = None
if user.id == self.bot.ownerID:
colour = discord.Color.purple()
#Check target's level against colour brackets
elif userProfile[2] < 100:
colour = discord.Colour.light_grey()
elif userProfile[2] < 200:
colour = discord.Colour.from_rgb(166, 121, 18)
elif userProfile[2] < 400:
colour = discord.Colour.from_rgb(104, 105, 104)
else:
colour = discord.Colour. from_rgb(221, 224, 0)
#Embed logic
embed = discord.Embed(title = user.display_name, description = str(userProfile[4]), colour = colour)
embed.set_thumbnail(url = str(user.avatar_url))
embed.add_field(name = "Experience:", value = str(userProfile[1]), inline = False)
embed.add_field(name = "Level:", value = "Level " + str(userProfile[2]), inline = False)
embed.add_field(name = "Credits:", value = str(userProfile[3]) + " credits", inline = False)
try:
if (userProfile[6] + 86400) < int(time.time()):
raise ValueError()
embed.add_field(name = "Time until daily reset:", value = await self.bot.grabDailyTimer(userProfile), inline = False)
except:
embed.add_field(name = "Time until daily reset:", value = "Daily is available!", inline = False)
badges = await self.bot.fetchAllVisibleBadges(ctx.message.author.id)
if len(badges) != 0:
footerText = ""
for i in range(0, len(badges)):
badge = await self.bot.fetchSingleItem(badges[i]["itemID"])
footerText += badge["emojiString"]
embed.add_field(name = footerText, value = "")
#Send the embed into the channel
await ctx.send(embed = embed)
@commands.command(name = "gift",
description = "Gift credits to another user by @mentioning them. Takes member then integer as arguments.",
brief = "Gift credits to another user.")
async def Gift(self, ctx, target: discord.User = None, amount = None):
if self.bot.SQLConnection == None:
raise self.bot.BOTrasedError("201")
if target == None:
raise self.bot.BOTrasedError("403 You haven't specified a user. @mention a user and try again.")
if target.id == ctx.author.id:
raise self.bot.BOTrasedError("402 You cannot gift credits to yourself.")
if amount <= 0:
raise self.bot.BOTrasedError("400 Amount cannot be a negative number.")
try:
amount = int(amount)
except:
raise self.bot.BOTrasedError("400 Please enter a positive integer and try again.")
if await self.bot.checkUserExists(target.id) == False:
raise self.bot.BOTrasedError("404 The target user is not in the database.")
elif await self.bot.checkUserExists(ctx.message.author.id) == False:
raise self.bot.BOTrasedError("200")
sendingUser = await self.bot.fetchUserProfile(ctx.message.author.id)
receivingUser = await self.bot.fetchUserProfile(target.id)
if sendingUser[3] < amount:
raise self.bot.BOTrasedError("406 You do not have sufficient credits to gift this amount.")
sendingUser[3] += -amount
receivingUser[3] += amount
await self.bot.updateUserProfile(sendingUser)
await self.bot.updateUserProfile(receivingUser)
await ctx.send(target.mention+", you've been gifted "+str(amount)+" credits by "+ ctx.message.author.mention+"!")
@commands.command(name = "inventory",
description = "Displays all of the items in your inventory, by order of itemID.",
brief = "Displays your inventory.")
async def inventory(self, ctx, target: discord.User = None):
if self.bot.SQLConnection == None:
raise self.bot.BOTrasedError("201")
if target == None:
target = ctx.message.author
userInventory = await self.bot.userInventoryLookup(target.id)
if len(userInventory) == 0:
await ctx.send(target.name + "'s inventory is empty.")
else:
pages = []
processedData = [userInventory[i:i + 10] for i in range(0, len(userInventory), 5)]
pageCount = len(processedData)
for i in range(0, pageCount):
embed = discord.Embed(title = target.name + "'s Inventory")
for j in range (0, len(processedData[i])):
statusEmoji = ""
if processedData[i][j]["type"] == "badge" and processedData[i][j]["showOnProfile"] == 1:
statusEmoji = ":eye:"
embed.add_field(name = "Item #" + str(processedData[i][j]["itemID"]) + "- " + processedData[i][j]["name"] + " " + statusEmoji,
value = processedData[i][j]["description"])
pages.append(embed)
await self.bot.sendEmbedPages(ctx, pages)
@commands.command(name = "showbadge",
description = "Toggles whether a badge appears on your profile or not when !profile is used.",
brief = "Toggles whether a badge is shown on your profile.")
async def showBadge(self, ctx, badgeID: int):
if self.bot.SQLConnection == None:
raise self.bot.BOTrasedError("201")
if await self.bot.checkUserHasItem(ctx.message.author.id, badgeID) == False:
raise self.bot.BOTrasedError("405 You do not have that badge.")
if await self.bot.checkItemType(badgeID, "badge") == False:
raise self.bot.BOTrasedError("402 That is not a badge.")
showOnProfile = 0
inventoryEntry = await self.bot.fetchSingleInventoryEntry(ctx.message.author.id, badgeID)
if inventoryEntry["showOnProfile"] == 0:
showOnProfile = 1
await self.bot.updateUserInventory(True, ctx.message.author.id, badgeID , showOnProfile)
def setup(bot):
bot.add_cog(UserProfile(bot)) |
import asyncio
from dataclasses import dataclass, field
import json
import time
import torch
import numpy as np
import ray
import ray.util
from ray import serve
import os
from datasets import load_dataset, load_metric
import requests
from tqdm import tqdm
from torch.utils.data import DataLoader, Subset
from transformers import HfArgumentParser
from hfutils.measure import get_energy_by_group, get_remote_gpu_energy, get_host_ip
home_dir = "/data"
dataset_path = os.path.join(home_dir, "ImageNet")
model_paths = f"{home_dir}/HuggingFace/WinKawaks/vit-tiny-patch16-224"
metric = load_metric("accuracy")
def metric_accuracy(logits, labels):
predictions = np.argmax(logits, axis=1).flatten()
# print(predictions, predictions.shape)
# print(labels, labels.shape)
return metric.compute(predictions=predictions, references=labels.flatten())[
"accuracy"
]
@dataclass
class Arguments:
namespace: str = field(metadata={"help": "test type"})
batch_size: int = field(
metadata={"help": "batch_size"},
)
parser = HfArgumentParser(Arguments)
args = parser.parse_args_into_dataclasses()[0]
batch_size = args.batch_size
# ------------- Dataset Prepare --------------
from torchvision.datasets import ImageNet
from hfutils.preprocess import (
split_train_test,
vit_collate_fn,
ViTFeatureExtractorTransforms,
)
from hfutils.measure import get_energy_by_group
print("======ImageNet==========")
dataset = ImageNet(
dataset_path,
split="val",
transform=ViTFeatureExtractorTransforms(model_paths, split="val"),
)
num_labels = len(dataset)
m = torch.nn.Softmax(dim=1)
inputs_list = []
label_list = []
rnd_seed = 106033
np.random.seed(rnd_seed)
index = np.array([x for x in range(len(dataset))])
np.random.shuffle(index)
dataset = Subset(dataset, index)
eval_dataloader = DataLoader(
dataset,
num_workers=4,
collate_fn=vit_collate_fn,
batch_size=batch_size,
)
host_ip = get_host_ip()
ray.init(address=f"ray://{host_ip}:10001", namespace=args.namespace)
inputs_list = []
labels_list = []
for step, batch in enumerate(tqdm(eval_dataloader)):
if step * batch_size > 1000:
break
pixel_values = batch["pixel_values"].numpy()
inputs_list.append((pixel_values,))
labels_list.append(batch["labels"].numpy())
labels_list = np.concatenate(labels_list)
hosts = [
"172.31.35.95",
# "172.31.39.160",
# "172.31.47.240",
# "172.31.32.224",
# "172.31.44.101",
# "172.31.36.213",
# "172.31.43.33",
# "172.31.39.35",
# "172.31.43.93",
# "172.31.34.158",
# "172.31.40.86",
# "172.31.47.59",
]
handles = [
serve.get_deployment(f"hybrid-scheduler_{host}_{r}").get_handle(sync=True)
for host in hosts
for r in range(1)
]
start_energy = np.array([get_remote_gpu_energy(host, 0) for host in hosts])
exec_start_time = time.perf_counter()
async_requests = []
time_list = []
energy_list = []
async_requests = []
for step, input in enumerate(tqdm(inputs_list)):
handle = handles[step % len(handles)]
start_time = time.perf_counter()
# response = handle.ensemble_inference.remote(input)
response = handle.handle_batch.remote(input)
# logits = ray.get(response)
# async_requests.append(logits)
async_requests.append(response)
end_time = time.perf_counter()
time_list.append(end_time - start_time)
async_requests = ray.get(async_requests)
async_requests = np.concatenate(async_requests)
# print(metric_accuracy(async_requests, labels_list))
exec_end_time = time.perf_counter()
end_energy = np.array([get_remote_gpu_energy(host, 0) for host in hosts])
print(end_energy - start_energy)
print(np.sum(time_list))
print(exec_end_time - exec_start_time)
with open(os.path.join(os.path.dirname(__file__), f"{args.namespace}.json"), "w") as fp:
json.dump(
{
"latency": time_list,
"exec_time": exec_end_time - exec_start_time,
"energy": (end_energy - start_energy).tolist(),
},
fp,
)
|
import cv2
import numpy as np
from data_toolbox.data.data_mixer import Buffer
from data_toolbox.data.data_operator import DataOperator
class DilateOperator(DataOperator):
def apply(self, source: Buffer) -> Buffer:
kernel = np.ones((5, 5), np.uint8)
return cv2.dilate(source, kernel, iterations=1)
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake service account key file."""
FAKE_KEYFILE = b"""
{
"type": "service_account",
"client_id": "id123",
"client_email": "foo@bar.com",
"private_key_id": "pkid456",
"private_key": "s3kr3tz"
}
"""
FAKE_REQUIRED_SCOPES = frozenset([
'https://www.googleapis.com/auth/admin.directory.group.readonly'
])
|
import json
import gzip
import sys
import tqdm
import os
import argparse
from typing import List, Set, Any, Dict
seen: Set[str] = set()
def aspect_link_examples(json_file: str) -> Dict[str, Any]:
"""
Reads the JSON-L file in gzip format.
Generates an AspectLinkExample in a lazy way (using yield).
:param json_file: JSON-L file in gzip format.
"""
with gzip.open(json_file, 'rt', encoding='UTF-8') as zipfile:
for line in zipfile:
yield json.loads(line)
def to_doc(candidate_aspects) -> List[str]:
data: List[str] = []
for aspect in candidate_aspects:
doc_id: str = aspect['aspect_id']
if doc_id not in seen:
doc: str = aspect['aspect_content']['content']
data.append(json.dumps({
'doc_id': doc_id,
'doc': doc
}))
data.append("\n")
seen.add(doc_id)
return data
def create_data(data_dir: str, data_file: str, out_file: str) -> None:
input_file = os.path.join(data_dir, data_file)
for example in tqdm.tqdm(aspect_link_examples(input_file), total=7893275):
candidate_aspects = example['candidate_aspects']
data: List[str] = to_doc(candidate_aspects)
write_to_file(data, out_file)
def write_to_file(data, output_file):
file = open(output_file, "a")
file.writelines(data)
file.close()
def main():
parser = argparse.ArgumentParser("Create document file in OpenMatch format.")
parser.add_argument("--data-dir", help="Directory where data is stored.", required=True)
parser.add_argument("--data-file", help="(Zipped) data file.", required=True)
parser.add_argument("--out", help="Output file.", required=True)
args = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
create_data(args.data_dir, args.data_file, args.out)
if __name__ == '__main__':
main()
|
# Generated by Django 3.2.4 on 2021-06-15 00:13
from django.db import migrations
class Migration(migrations.Migration):
def create_default_paper_template(apps, schema_editor):
Template = apps.get_model('voucher', 'PrintTemplate')
Template.objects.create(
name="Default paper template",
type="Paper",
template = """<div class="voucher_block">
<h1>#PORTALNAME#</h1>
<table>
<tr>
<th>Network:</th>
</tr><tr>
<td>#SSID#</td>
</tr><tr>
<th>Password:</th>
</tr><tr>
<td>#PSK#</td>
</tr><tr>
<th>Voucher Code:</th>
</tr><tr>
<td>#CODE#</td>
</tr><tr>
<th>Valid for:</th>
</tr><tr>
<td>#TIMELIMIT#</td>
</tr>
</table>
<div class="rightside">
<div class="qr" id="qr-#CODE#" rowspan="4"></div>
<script type="text/javascript">
new QRCode(document.getElementById("qr-#CODE#"), {text: "WIFI:T:WPA;S:#SSID#;P:#PSK;;", width: 120, height: 120});
</script>
<p class="info">If not prompted for voucher code, open web browser to www.neverssl.com</p>
</div>
</div>
"""
)
def create_default_dymo_template(apps, schema_editor):
Template = apps.get_model('voucher', 'PrintTemplate')
Template.objects.create(
name="Default dymo template",
type="Dymo",
template = """<?xml version="1.0" encoding="utf-8"?>
<DesktopLabel Version="1">
<DYMOLabel Version="3">
<Description>DYMO Label</Description>
<Orientation>Landscape</Orientation>
<LabelName>Address30251</LabelName>
<InitialLength>0</InitialLength>
<BorderStyle>SolidLine</BorderStyle>
<DYMORect>
<DYMOPoint>
<X>0.23</X>
<Y>0.06</Y>
</DYMOPoint>
<Size>
<Width>3.21</Width>
<Height>0.9966667</Height>
</Size>
</DYMORect>
<BorderColor>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderColor>
<BorderThickness>1</BorderThickness>
<Show_Border>False</Show_Border>
<DynamicLayoutManager>
<RotationBehavior>ClearObjects</RotationBehavior>
<LabelObjects>
<TextObject>
<Name>PortalName</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>PortalName</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>9.4</FontSize>
<IsBold>True</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>0.23</X>
<Y>0.06000001</Y>
</DYMOPoint>
<Size>
<Width>2.376983</Width>
<Height>0.1750735</Height>
</Size>
</ObjectLayout>
</TextObject>
<QRCodeObject>
<Name>QRCode</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="1" R="1" G="1" B="1"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<BarcodeFormat>QRCode</BarcodeFormat>
<Data>
<DataString>WIFI:T:WPA;S:StLeonardsGuest;P:6ddBQbmf;;</DataString>
</Data>
<HorizontalAlignment>Center</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<Size>AutoFit</Size>
<EQRCodeType>QRCodeText</EQRCodeType>
<TextDataHolder>
<Value>WIFI:T:WPA;S:StLeonardsGuest;P:6ddBQbmf;;</Value>
</TextDataHolder>
<ObjectLayout>
<DYMOPoint>
<X>2.606984</X>
<Y>0.06000001</Y>
</DYMOPoint>
<Size>
<Width>0.8330169</Width>
<Height>0.7408347</Height>
</Size>
</ObjectLayout>
</QRCodeObject>
<TextObject>
<Name>ITextObject03</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Center</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Center</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>If you aren't prompted for your voucher, open a web browser to www.neverssl.com</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>6.2</FontSize>
<IsBold>False</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>0.23</X>
<Y>0.9297366</Y>
</DYMOPoint>
<Size>
<Width>3.132135</Width>
<Height>0.125</Height>
</Size>
</ObjectLayout>
</TextObject>
<TextObject>
<Name>ITextObject5</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Right</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Right</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>Network:</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>8.6</FontSize>
<IsBold>True</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>0.23</X>
<Y>0.2308425</Y>
</DYMOPoint>
<Size>
<Width>0.7154332</Width>
<Height>0.1499566</Height>
</Size>
</ObjectLayout>
</TextObject>
<TextObject>
<Name>ITextObject3</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Right</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Right</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>Password:</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>8.6</FontSize>
<IsBold>True</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>0.23</X>
<Y>0.4304173</Y>
</DYMOPoint>
<Size>
<Width>0.7154332</Width>
<Height>0.1499566</Height>
</Size>
</ObjectLayout>
</TextObject>
<TextObject>
<Name>ITextObject4</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Right</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Right</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>Voucher Code:</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>8.2</FontSize>
<IsBold>True</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>0.23</X>
<Y>0.6057339</Y>
</DYMOPoint>
<Size>
<Width>0.7154332</Width>
<Height>0.1499566</Height>
</Size>
</ObjectLayout>
</TextObject>
<TextObject>
<Name>ITextObject56</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Right</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Right</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>Valid for:</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>8.2</FontSize>
<IsBold>True</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>0.23</X>
<Y>0.7817099</Y>
</DYMOPoint>
<Size>
<Width>0.7154332</Width>
<Height>0.1499566</Height>
</Size>
</ObjectLayout>
</TextObject>
<TextObject>
<Name>SSID</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>SSID</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>8.6</FontSize>
<IsBold>False</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>1.054057</X>
<Y>0.2308425</Y>
</DYMOPoint>
<Size>
<Width>1.484022</Width>
<Height>0.1499566</Height>
</Size>
</ObjectLayout>
</TextObject>
<TextObject>
<Name>PSK</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>PSK</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>8.6</FontSize>
<IsBold>False</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>1.054057</X>
<Y>0.4304174</Y>
</DYMOPoint>
<Size>
<Width>1.458459</Width>
<Height>0.1499566</Height>
</Size>
</ObjectLayout>
</TextObject>
<TextObject>
<Name>VoucherCode</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>VoucherCode</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>8.6</FontSize>
<IsBold>False</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>1.054057</X>
<Y>0.6057339</Y>
</DYMOPoint>
<Size>
<Width>1.50576</Width>
<Height>0.1499567</Height>
</Size>
</ObjectLayout>
</TextObject>
<TextObject>
<Name>TimeLimit</Name>
<Brushes>
<BackgroundBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BackgroundBrush>
<BorderBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</BorderBrush>
<StrokeBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</StrokeBrush>
<FillBrush>
<SolidColorBrush>
<Color A="0" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FillBrush>
</Brushes>
<Rotation>Rotation0</Rotation>
<OutlineThickness>1</OutlineThickness>
<IsOutlined>False</IsOutlined>
<BorderStyle>SolidLine</BorderStyle>
<Margin>
<DYMOThickness Left="0" Top="0" Right="0" Bottom="0" />
</Margin>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<FitMode>AlwaysFit</FitMode>
<IsVertical>False</IsVertical>
<FormattedText>
<FitMode>AlwaysFit</FitMode>
<HorizontalAlignment>Left</HorizontalAlignment>
<VerticalAlignment>Middle</VerticalAlignment>
<IsVertical>False</IsVertical>
<LineTextSpan>
<TextSpan>
<Text>TimeLimit</Text>
<FontInfo>
<FontName>Calibri</FontName>
<FontSize>8.6</FontSize>
<IsBold>False</IsBold>
<IsItalic>False</IsItalic>
<IsUnderline>False</IsUnderline>
<FontBrush>
<SolidColorBrush>
<Color A="1" R="0" G="0" B="0"> </Color>
</SolidColorBrush>
</FontBrush>
</FontInfo>
</TextSpan>
</LineTextSpan>
</FormattedText>
<ObjectLayout>
<DYMOPoint>
<X>1.06557</X>
<Y>0.77978</Y>
</DYMOPoint>
<Size>
<Width>1.494245</Width>
<Height>0.1499566</Height>
</Size>
</ObjectLayout>
</TextObject>
</LabelObjects>
</DynamicLayoutManager>
</DYMOLabel>
<LabelApplication>Blank</LabelApplication>
<DataTable>
<Columns></Columns>
<Rows></Rows>
</DataTable>
</DesktopLabel>
"""
)
dependencies = [
('voucher', '0001_initial'),
]
operations = [
migrations.RunPython(create_default_dymo_template),
migrations.RunPython(create_default_paper_template)
]
|
import os
import pytest
from unittest import TestCase
from unittest.mock import patch, mock_open
from hm_pyhelper.exceptions import UnknownVariantException, \
UnknownVariantAttributeException
from hm_pyhelper.hardware_definitions import is_rockpi, variant_definitions, \
get_variant_attribute, is_raspberry_pi
from hm_pyhelper.sbc import BALENA_ENV_RASPBERRY_PI_MODELS, \
BALENA_ENV_ROCKPI_MODELS
BUILTINS_OPEN_LITERAL = "builtins.open"
class TestHardwareDefinitions(TestCase):
def test_variant_definitions(self):
# Not currently expecting APPNAME, GPIO_PIN_LED, or PIO_PIN_BUTTON
expected_fields = {
'FRIENDLY',
'SPIBUS',
'RESET',
'MAC',
'STATUS',
'BUTTON',
'ECCOB',
'TYPE',
'CELLULAR',
'FCC_IDS',
'CONTAINS_FCC_IDS',
'IC_IDS',
'CONTAINS_IC_IDS'
}
for variant_name, variant_dict in variant_definitions.items():
variant_keys = variant_dict.keys()
missing_keys = expected_fields.difference(variant_keys)
self.assertSetEqual(missing_keys, set())
mock_variant_definitions = {
'NEBHNT-XYZ': {
'FRIENDLY': 'XYZ Hotspot Gen 1',
'APPNAME': 'APPNAMEXYZ',
'SPIBUS': 'spidevX.Y',
'KEY_STORAGE_BUS': '/dev/i2c-X',
'RESET': 00,
'MAC': 'ethXYZ',
'STATUS': 00,
'BUTTON': 00,
'ECCOB': True,
'TYPE': 'TYPEXYZ',
'CELLULAR': False,
'FCC_IDS': ['1'],
'CONTAINS_FCC_IDS': ['2', '3'],
'IC_IDS': ['4'],
'CONTAINS_IC_IDS': []
}
}
@patch('hm_pyhelper.hardware_definitions.variant_definitions',
mock_variant_definitions)
def test_get_variant_attribute(self):
mock_variant = self.mock_variant_definitions['NEBHNT-XYZ']
mock_variant_items = mock_variant.items()
for attribute_name, attribute_val in mock_variant_items:
returned_val = get_variant_attribute('NEBHNT-XYZ', attribute_name)
self.assertEqual(returned_val, attribute_val)
@patch('hm_pyhelper.hardware_definitions.variant_definitions',
mock_variant_definitions)
def test_get_variant_attribute_unknown_variant(self):
with pytest.raises(UnknownVariantException):
get_variant_attribute('Nonexistant', 'FRIENDLY')
@patch('hm_pyhelper.hardware_definitions.variant_definitions',
mock_variant_definitions)
def test_get_variant_attribute_unknown_attribute(self):
with pytest.raises(UnknownVariantAttributeException):
get_variant_attribute('NEBHNT-XYZ', 'Nonexistant')
# raspberry pi model names picked from pi kernel sources
# https://github.com/raspberrypi/linux
# grep -ir "raspberry" linux/arch/arm* | grep "model =" | cut -d "=" -f2
mock_known_dts_pi_models = [
"Raspberry Pi Model B+",
"Raspberry Pi Model B",
"Raspberry Pi Compute Module",
"Raspberry Pi Zero",
"Raspberry Pi 2 Model B rev 1.2",
"Raspberry Pi Compute Module 3",
"Raspberry Pi Zero 2 W",
"Raspberry Pi 4 Model B",
"Raspberry Pi 400",
"Raspberry Pi Compute Module 4",
"Raspberry Pi Compute Module 4S",
"Raspberry Pi Model A+",
"Raspberry Pi Model A",
"Raspberry Pi Model B rev2",
"Raspberry Pi Compute Module IO board rev1",
"Raspberry Pi Zero W",
"Raspberry Pi 2 Model B",
"Raspberry Pi 3 Model A+",
"Raspberry Pi 3 Model B+",
"Raspberry Pi 3 Model B",
"Raspberry Pi Compute Module 3 IO board V3.0"
]
def test_is_raspberry_pi(self):
for model in self.mock_known_dts_pi_models:
with patch(BUILTINS_OPEN_LITERAL, new_callable=mock_open, read_data=model):
self.assertTrue(is_raspberry_pi())
with patch(BUILTINS_OPEN_LITERAL, new_callable=mock_open, read_data="Rock something"):
self.assertFalse(is_raspberry_pi())
# test balena env based detection
for model in BALENA_ENV_RASPBERRY_PI_MODELS:
with patch.dict(os.environ, {'BALENA_DEVICE_TYPE': model}):
self.assertTrue(is_raspberry_pi())
# in absence of the env, it should look for /proc/device-tree/model
# which will not exist on test environment.
with self.assertRaises(FileNotFoundError):
self.assertFalse(is_raspberry_pi())
mock_known_rock_dts_models = ["ROCK PI 4B"]
def test_is_rock_pi(self):
for model in self.mock_known_rock_dts_models:
with patch(BUILTINS_OPEN_LITERAL, new_callable=mock_open, read_data=model):
self.assertTrue(is_rockpi())
with patch(BUILTINS_OPEN_LITERAL, new_callable=mock_open,
read_data="raspberry something"):
self.assertFalse(is_rockpi())
# test balena env based detection
for model in BALENA_ENV_ROCKPI_MODELS:
with patch.dict(os.environ, {'BALENA_DEVICE_TYPE': model}):
self.assertTrue(is_rockpi())
# in absence of the env, it should look for /proc/device-tree/model
# which will not exist on test environment.
with self.assertRaises(FileNotFoundError):
self.assertFalse(is_rockpi())
|
from django.conf.urls import url
from surfcar.questions import views
urlpatterns = [
url(r'^$', views.questions, name='questions'),
url(r'^answered/$', views.answered, name='answered'),
url(r'^unanswered/$', views.unanswered, name='unanswered'),
url(r'^all/$', views.all, name='all'),
url(r'^ask/$', views.AskQuestion.as_view(), name='ask'),
url(r'^favorite/$', views.favorite, name='favorite'),
url(r'^answer/$', views.answer, name='answer'),
url(r'^answer/accept/$', views.accept, name='accept'),
url(r'^answer/vote/$', views.vote, name='vote'),
url(r'^question/vote/$', views.question_vote, name='question_vote'),
url(r'^(\d+)/$', views.question, name='question'),
]
|
#!/usr/bin/python
#! -*- encoding: utf-8 -*-
# Copyright (c) 2015 Pierre MOULON.
# Modified by Ivan Eichhardt in 2019
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
#
# this script is to evaluate the Global SfM pipeline to a known camera trajectory
# Notes:
# - OpenMVG 0.9 is required
#
# Usage:
# $ python EvaluationLauncher.py OPENMVG_BIN_DIR HIGHERORDER_BIN ./Benchmarking_Camera_Calibration_2008 ./Benchmarking_Camera_Calibration_2008_out
#
#
# database can be found here: https://github.com/openMVG/SfM_quality_evaluation
import sys
import os
import subprocess
if sys.version_info[0] < 3:
import commands
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
if len(sys.argv) < 5:
print ("/!\ Invalid input")
print ("Usage %s OPENMVG_BIN_DIR HIGHERORDER_BIN ./GT_DATASET ./GT_DATASET_out" % sys.argv[0])
sys.exit(1)
OPENMVG_SFM_BIN = sys.argv[1]
if not (os.path.exists(OPENMVG_SFM_BIN)):
print("/!\ Please use a valid OPENMVG_SFM_BIN directory.")
sys.exit(1)
HIGHERORDER_BIN = sys.argv[2]
if not (os.path.exists(HIGHERORDER_BIN)):
print("/!\ Please use a valid HIGHERORDER_BIN directory.")
sys.exit(1)
input_eval_dir = sys.argv[3]
output_eval_dir = os.path.join(sys.argv[4], "evaluation_output")
# Run for each dataset of the input eval dir perform
# . intrinsic setup
# . compute features
# . compute matches
# . compute camera motion
# . perform quality evaluation regarding ground truth camera trajectory
for directory in os.listdir(input_eval_dir):
print (directory)
matches_dir = os.path.join(output_eval_dir, directory, "matching")
ensure_dir(matches_dir)
print (". intrinsic setup")
command = OPENMVG_SFM_BIN + "/openMVG_main_SfMInit_ImageListing"
command = command + " -i " + input_eval_dir + "/" + directory + "/images/"
command = command + " -o " + matches_dir
command = command + " -k \"2759.48;0;1520.69;0;2764.16;1006.81;0;0;1\""
command = command + " -c 1" # force pinhole camera
command = command + " -g 1" # shared intrinsic
proc = subprocess.Popen((str(command)), shell=True)
proc.wait()
print (". compute features")
command = OPENMVG_SFM_BIN + "/openMVG_main_ComputeFeatures"
command = command + " -i " + matches_dir + "/sfm_data.json"
command = command + " -o " + matches_dir
command = command + " -m " + "TBMR_LIOP"
proc = subprocess.Popen((str(command)), shell=True)
proc.wait()
print (". compute matches")
command = OPENMVG_SFM_BIN + "/openMVG_main_ComputeMatches"
command = command + " -i " + matches_dir + "/sfm_data.json"
command = command + " -o " + matches_dir + " -r .8 -g e -n ANNL2 -f 1"
proc = subprocess.Popen((str(command)), shell=True)
proc.wait()
print (". compute camera motion")
outGlobal_dir = os.path.join(output_eval_dir, directory, "SfM_Global")
command = OPENMVG_SFM_BIN + "/openMVG_main_GlobalSfM"
command = command + " -i " + matches_dir + "/sfm_data.json"
command = command + " -m " + matches_dir
command = command + " -o " + outGlobal_dir
command = command + " -f NONE" # Do not refine intrinsics
proc = subprocess.Popen((str(command)), shell=True)
proc.wait()
#print (". perform quality evaluation")
#gt_camera_dir = os.path.join(input_eval_dir, directory, "gt_dense_cameras")
#outStatistics_dir = os.path.join(outGlobal_dir, "stats")
#command = OPENMVG_SFM_BIN + "/openMVG_main_evalQuality"
#command = command + " -i " + gt_camera_dir
#command = command + " -c " + outGlobal_dir + "/sfm_data.bin"
#command = command + " -o " + outStatistics_dir
#proc = subprocess.Popen((str(command)), shell=True)
#proc.wait()
# optional, compute final valid structure from the known camera poses
print (". Structure from Known Poses (robust triangulation)")
proc = subprocess.Popen( [os.path.join(OPENMVG_SFM_BIN, "openMVG_main_ComputeStructureFromKnownPoses"), "-i", outGlobal_dir+"/sfm_data.bin", "-m", matches_dir, "-f", os.path.join(matches_dir, "matches.e.bin"), "-o", os.path.join(outGlobal_dir,"robust.bin")], shell=True )
proc.wait()
# draw LAFs BEFORE refinement
proc = subprocess.Popen( [os.path.join(HIGHERORDER_BIN, "Sample_OpenMVG_LAFsToSVG"), "-i", outGlobal_dir+"/robust.bin", "-o", outGlobal_dir+"/SVG_original"], shell=True )
proc.wait()
# estimate surface normals (BEFORE refinement)
proc = subprocess.Popen( [os.path.join(HIGHERORDER_BIN, "Sample_OpenMVG_EstimateNormals"), "-i", outGlobal_dir+"/robust.bin", "-o", outGlobal_dir] )
proc.wait()
# PERFORM REFINEMENT
proc = subprocess.Popen( [os.path.join(HIGHERORDER_BIN, "Sample_OpenMVG_RefineLAFs"), "-i", outGlobal_dir+"/robust.bin", "-o", outGlobal_dir], shell=True )
proc.wait()
# draw LAFs AFTER refinement
proc = subprocess.Popen( [os.path.join(HIGHERORDER_BIN, "Sample_OpenMVG_LAFsToSVG"), "-i", outGlobal_dir+"/refined_robust.bin", "-o", outGlobal_dir+"/SVG_refined"], shell=True )
proc.wait()
# estimate surface normals (AFTER refinement)
proc = subprocess.Popen( [os.path.join(HIGHERORDER_BIN, "Sample_OpenMVG_EstimateNormals"), "-i", outGlobal_dir+"/refined_robust.bin", "-o", outGlobal_dir] )
proc.wait()
sys.exit(1)
|
import sys
from io import BytesIO
from PIL import Image
from django.contrib.auth.base_user import BaseUserManager, AbstractBaseUser
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db import models
class MyUserManager(BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password=None):
user = self.create_user(
email,
password=password,
)
user.is_admin = True
user.save(using=self._db)
return user
class MyUser(AbstractBaseUser):
email = models.EmailField("Email" ,blank=True, unique=True)
first_name = models.CharField("Имя",blank=True, max_length=30)
last_name = models.CharField("Фамилия", blank=True, max_length=30)
phone = models.CharField("Телефон", blank=True, null=True, max_length=16)
avatar = models.ImageField("Аватар", blank=True)
status_email = models.BooleanField("Подтверждён Email", default=False)
date_reg = models.DateTimeField("Дата регистрации", auto_now_add=True)
is_admin = models.BooleanField("Админ", default=False)
banned = models.BooleanField("Забанен", default=False)
object = MyUserManager()
def __str__(self):
return f"{self.email}"
def save(self, *args, **kwargs):
if self.avatar:
image = self.avatar
img = Image.open(image)
new_img = img.convert('RGB')
resized_new_img = new_img.resize((200, 200), Image.ANTIALIAS)
filestream = BytesIO()
resized_new_img.save(filestream, 'JPEG', quality=90)
filestream.seek(0)
name = '{}.{}'.format(*self.avatar.name.split('.'))
self.avatar = InMemoryUploadedFile(
filestream, 'ImageField', name, 'jpeg/image', sys.getsizeof(filestream), None
)
self.full_name = f'{self.first_name} {self.last_name}'
super().save(*args, **kwargs)
class Meta:
verbose_name = "Аккаунт"
verbose_name_plural = "Аккаунты"
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
@property
def is_staff(self):
return self.is_admin
|
from unittest import TestCase
import os
from tempfile import mktemp
import gzip
from blaze.utils import filetext, filetexts, tmpfile
from blaze.data.utils import tuplify
from blaze.data import *
from blaze.data.usability import *
from blaze.compatibility import xfail
class TestResource(TestCase):
def setUp(self):
self.filename = mktemp()
def tearDown(self):
if os.path.exists(self.filename):
os.remove(self.filename)
def test_resource_csv(self):
with filetext('1,1\n2,2', extension='.csv') as fn:
dd = resource(fn, schema='2 * int')
assert isinstance(dd, CSV)
self.assertEqual(tuplify(list(dd)), ((1, 1), (2, 2)))
def test_resource_json(self):
with filetext('[[1,1], [2,2]]', extension='.json') as fn:
dd = resource(fn, schema='2 * int')
assert isinstance(dd, JSON)
self.assertEqual(tuplify(list(dd)), ((1, 1), (2, 2)))
@xfail
def test_resource_gz(self):
with filetext('1,1\n2,2', extension='.csv.gz', open=gzip.open) as fn:
dd = resource(fn, schema='2 * int')
assert isinstance(dd, CSV)
self.assertEqual(dd.open, gzip.open)
self.assertEqual(tuplify(list(dd)), ((1, 1), (2, 2)))
def test_filesystem(self):
prefix = 'test_filesystem'
d = {prefix + 'a.csv': '1,1\n2,2',
prefix + 'b.csv': '1,1\n2,2'}
with filetexts(d) as filenames:
dd = resource(prefix + '*.csv', schema='2 * int')
self.assertEqual(tuplify(tuple(dd)),
(((1, 1), (2, 2)), ((1, 1), (2, 2))))
def test_sql(self):
assert isinstance(resource('sqlite:///:memory:::tablename',
schema='{x: int, y: int}'),
SQL)
def test_hdf5(self):
with tmpfile('.hdf5') as filename:
assert isinstance(resource(filename + '::/path/to/data/',
schema='2 * int'),
HDF5)
class TestCopy(TestCase):
def test_copy(self):
with filetext('1,1\n2,2', extension='.csv') as a:
with tmpfile(extension='.csv') as b:
A = resource(a, schema='2 * int')
B = resource(b, schema='2 * int', mode='a')
copy(A, B)
assert tuplify(list(B)) == ((1, 1), (2, 2))
class TestInto(TestCase):
def test_into(self):
with filetext('1,1\n2,2', extension='.csv') as a:
with tmpfile(extension='.csv') as b:
A = resource(a, schema='2 * int')
B = resource(b, schema='2 * int', mode='a')
B = into(B, A)
assert tuplify(list(B)) == ((1, 1), (2, 2))
def test_into_iterable(self):
with tmpfile(extension='.csv') as fn:
A = CSV(fn, 'a', schema='2 * int')
data = [(1, 2), (3, 4)]
A = into(A, data)
assert tuplify(list(A)) == tuplify(data)
|
import logging
import os
import subprocess
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
logger = logging.getLogger(__name__)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_data()
logger.info(data["key"])
logger.info(data["path"])
os.environ["PASSWORD_STORE_DIR"] = data["path"]
subprocess.run(["pass", "otp", "-c", data["key"]], check=True)
return HideWindowAction()
|
"""
Tests related specifically to integration with Morango.
"""
import os
import unittest
import uuid
from django.test import TestCase
from django.utils import timezone
from morango.models import InstanceIDModel
from morango.models import Store
from morango.models import syncable_models
from morango.sync.controller import MorangoProfileController
from ..constants.morango_sync import PROFILE_FACILITY_DATA
from ..models import Classroom
from ..models import Facility
from ..models import FacilityDataset
from ..models import FacilityUser
from ..models import LearnerGroup
from ..models import Membership
from ..models import Role
from .helpers import DUMMY_PASSWORD
from .sync_utils import multiple_kolibri_servers
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
class FacilityDatasetCertificateTestCase(TestCase):
def test_creating_facility_creates_dataset(self):
facility = Facility.objects.create(name="hallo")
self.assertIsNotNone(facility.dataset)
def test_creating_facilitydataset_creates_certificate(self):
dataset = FacilityDataset.objects.create()
self.assertIsNotNone(dataset.get_root_certificate())
def test_partition_and_id_values(self):
facility = Facility.objects.create(name="hallo")
dataset_id = facility.dataset.id
self.assertEqual(dataset_id, facility.dataset.get_root_certificate().id)
self.assertEqual(dataset_id, facility.dataset._morango_source_id)
self.assertTrue(facility.dataset._morango_partition.startswith(dataset_id))
scope = facility.dataset.get_root_certificate().get_scope()
for partition in scope.read_filter + scope.write_filter:
self.assertTrue(partition.startswith(dataset_id))
class DateTimeTZFieldTestCase(TestCase):
def setUp(self):
self.controller = MorangoProfileController(PROFILE_FACILITY_DATA)
InstanceIDModel.get_or_create_current_instance()
def test_deserializing_field(self):
facility = Facility.objects.create(name="hallo")
FacilityUser.objects.create(username="jamie", facility=facility)
self.controller.serialize_into_store()
Store.objects.update(dirty_bit=True)
try:
self.controller.deserialize_from_store()
except AttributeError as e:
self.fail(e.message)
@unittest.skipIf(
not os.environ.get("INTEGRATION_TEST"),
"This test will only be run during integration testing.",
)
class EcosystemTestCase(TestCase):
"""
Where possible this test case uses the using kwarg with the db alias in order
to save models to the write DB. Unfortunately, because of an internal issue with
MPTT, this will sometimes fail for MPTT models.
TODO: defer this to the server class as an implementation detail.
"""
def _create_objects(self, server):
fac = Facility.objects.using(server.db_alias).first()
admin = FacilityUser(
username=uuid.uuid4().hex[:30], password=DUMMY_PASSWORD, facility=fac
)
admin.save(using=server.db_alias)
learner = FacilityUser(
username=uuid.uuid4().hex[:30], password=DUMMY_PASSWORD, facility=fac
)
learner.save(using=server.db_alias)
name = uuid.uuid4().hex
server.create_model(Classroom, parent_id=fac.id, name=name)
class_id = Classroom.objects.using(server.db_alias).get(name=name).id
name = uuid.uuid4().hex
server.create_model(LearnerGroup, parent_id=class_id, name=name)
lg_id = LearnerGroup.objects.using(server.db_alias).get(name=name).id
server.create_model(Membership, user_id=learner.id, collection_id=class_id)
server.create_model(Membership, user_id=learner.id, collection_id=lg_id)
server.create_model(Role, collection_id=fac.id, user_id=admin.id, kind="admin")
def assertServerQuerysetEqual(self, s1, s2, dataset_id):
models = syncable_models.get_models(PROFILE_FACILITY_DATA)
models.pop(
0
) # remove FacilityDataset because __str__() does not point to correct db alias
for model in models:
self.assertQuerysetEqual(
model.objects.using(s1.db_alias).filter(dataset_id=dataset_id),
[
repr(u)
for u in model.objects.using(s2.db_alias).filter(
dataset_id=dataset_id
)
],
ordered=False,
)
# morango models
self.assertQuerysetEqual(
Store.objects.using(s1.db_alias).filter(partition__startswith=dataset_id),
[
repr(u)
for u in Store.objects.using(s2.db_alias).filter(
partition__startswith=dataset_id
)
],
ordered=False,
)
@multiple_kolibri_servers(3)
def test_scenarios(self, servers):
servers_len = len(servers)
self.maxDiff = None
s0_alias = servers[0].db_alias
s0_url = servers[0].baseurl
s1_alias = servers[1].db_alias
s1_url = servers[1].baseurl
s2_alias = servers[2].db_alias
s2_url = servers[2].baseurl
servers[0].manage("loaddata", "content_test")
servers[0].manage(
"generateuserdata", "--no-onboarding", "--num-content-items", "1"
)
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--username",
"superuser",
"--password",
"password",
)
servers[2].manage(
"sync",
"--baseurl",
s1_url,
"--username",
"superuser",
"--password",
"password",
)
# assert that all kolibri instances start off equal
for i in range(servers_len):
self.assertServerQuerysetEqual(
servers[i],
servers[(i + 1) % servers_len],
FacilityDataset.objects.using(servers[0].db_alias).first().id,
)
# assert created user is synced
FacilityUser(
username="user",
password=DUMMY_PASSWORD,
facility=Facility.objects.using(s0_alias).first(),
).save(using=s0_alias)
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--username",
"superuser",
"--password",
"password",
)
self.assertTrue(
FacilityUser.objects.using(s1_alias).filter(username="user").exists()
)
# missing foreign key lookup should be handled gracefully (https://github.com/learningequality/kolibri/pull/5734)
user = FacilityUser.objects.using(s1_alias).get(username="user")
fac = Facility.objects.using(s1_alias).get()
servers[1].create_model(
Role, collection_id=fac.id, user_id=user.id, kind="admin"
)
servers[0].delete_model(FacilityUser, id=user.id)
# role object that is synced will try to do FK lookup on deleted user
servers[0].manage(
"sync",
"--baseurl",
s1_url,
"--username",
"superuser",
"--password",
"password",
)
# create user with same username on two servers and check they both exist
FacilityUser(
username="copycat",
password=DUMMY_PASSWORD,
facility=Facility.objects.using(s0_alias).first(),
).save(using=s0_alias)
FacilityUser(
username="copycat",
password=DUMMY_PASSWORD,
facility=Facility.objects.using(s1_alias).first(),
).save(using=s1_alias)
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--username",
"superuser",
"--password",
"password",
)
self.assertEqual(
FacilityUser.objects.using(s0_alias).filter(username="copycat").count(), 2
)
self.assertEqual(
FacilityUser.objects.using(s1_alias).filter(username="copycat").count(), 2
)
# Add a classroom
servers[0].create_model(
Classroom,
name="classroom",
parent_id=Facility.objects.using(s0_alias).first().id,
)
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--username",
"superuser",
"--password",
"password",
)
self.assertTrue(
Classroom.objects.using(s1_alias).filter(name="classroom").exists()
)
# Add a learnergroup
servers[0].create_model(
LearnerGroup,
name="learnergroup",
parent_id=Classroom.objects.using(s0_alias).first().id,
)
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--username",
"superuser",
"--password",
"password",
)
self.assertTrue(
LearnerGroup.objects.using(s1_alias).filter(name="learnergroup").exists()
)
# assert conflicting serialized data is appended after same role is created on different device
fac = Facility.objects.using(s1_alias).get()
alk_user = FacilityUser.objects.using(s0_alias).get(username="Antemblowind")
servers[1].create_model(
Role, collection_id=fac.id, user_id=alk_user.id, kind="admin"
)
servers[0].create_model(
Role, collection_id=fac.id, user_id=alk_user.id, kind="admin"
)
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--username",
"superuser",
"--password",
"password",
)
role = Role.objects.using(s1_alias).get(user=alk_user)
admin_role = Store.objects.using(s1_alias).get(id=role.id)
self.assertTrue(admin_role.conflicting_serialized_data)
# assert deleted object is propagated
servers[0].delete_model(FacilityUser, id=alk_user.id)
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--username",
"superuser",
"--password",
"password",
)
self.assertFalse(
FacilityUser.objects.using(s1_alias)
.filter(username="Antemblowind")
.exists()
)
self.assertTrue(Store.objects.using(s1_alias).get(id=alk_user.id).deleted)
# # role deletion and re-creation
# Change roles for users
alto_user = FacilityUser.objects.using(s1_alias).get(username="Altobjews1977")
servers[1].create_model(
Role, collection_id=fac.id, user_id=alto_user.id, kind="admin"
)
role_id = (
Role.objects.using(s1_alias)
.get(collection_id=fac.id, user_id=alto_user.id)
.id
)
servers[1].manage(
"sync",
"--baseurl",
s2_url,
"--username",
"superuser",
"--password",
"password",
)
self.assertEqual(
FacilityUser.objects.using(s2_alias)
.get(username="Altobjews1977")
.roles.all()
.first()
.kind,
"admin",
)
# delete admin role and sync
servers[2].delete_model(Role, id=role_id)
servers[1].manage(
"sync",
"--baseurl",
s2_url,
"--username",
"superuser",
"--password",
"password",
)
# create admin role and sync
servers[1].create_model(
Role, collection_id=fac.id, user_id=alto_user.id, kind="admin"
)
role_id = (
Role.objects.using(s1_alias).get(collection=fac.id, user=alto_user.id).id
)
servers[1].manage(
"sync",
"--baseurl",
s2_url,
"--username",
"superuser",
"--password",
"password",
)
self.assertFalse(Store.objects.using(s2_alias).get(id=role_id).deleted)
# Change password for a user, check is changed on other device
server1_fu = FacilityUser.objects.using(s1_alias).get(id=alto_user.id)
server1_fu.set_password("syncing")
server1_fu.save(using=s1_alias)
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--username",
"superuser",
"--password",
"password",
)
server0_fu = FacilityUser.objects.using(s0_alias).get(id=alto_user.id)
self.assertTrue(server0_fu.check_password("syncing"))
# sync in a circle node twice to ensure full consistency
for i in range(2):
for j in range(servers_len):
servers[j].manage(
"sync",
"--baseurl",
servers[(j + 1) % servers_len].baseurl,
"--username",
"superuser",
"--password",
"password",
)
# assert that the data of specific models match up
for i in range(servers_len):
self.assertServerQuerysetEqual(
servers[i],
servers[(i + 1) % servers_len],
FacilityDataset.objects.using(servers[0].db_alias).first().id,
)
@multiple_kolibri_servers(5)
def test_chaos_sync(self, servers):
servers_len = len(servers)
# consistent state for all servers
servers[0].manage("generateuserdata", "--no-onboarding")
for i in range(servers_len - 1):
servers[i + 1].manage(
"sync",
"--baseurl",
servers[0].baseurl,
"--username",
"superuser",
"--password",
"password",
)
# randomly create objects on two servers and sync with each other
for i in range(10):
if (i % 2) == 0:
self._create_objects(servers[2])
else:
self._create_objects(servers[4])
servers[2].manage(
"sync",
"--baseurl",
servers[4].baseurl,
"--username",
"superuser",
"--password",
"password",
)
# sync in a circle node twice to ensure full consistency
for i in range(2):
for j in range(servers_len):
servers[j].manage(
"sync",
"--baseurl",
servers[(j + 1) % servers_len].baseurl,
"--username",
"superuser",
"--password",
"password",
)
# assert that the data of specific models match up
for i in range(servers_len):
self.assertServerQuerysetEqual(
servers[i],
servers[(i + 1) % servers_len],
FacilityDataset.objects.using(servers[0].db_alias).first().id,
)
@multiple_kolibri_servers(3)
def test_single_user_sync(self, servers):
self.maxDiff = None
s0_alias = servers[0].db_alias
s0_url = servers[0].baseurl
s1_alias = servers[1].db_alias
s2_alias = servers[2].db_alias
servers[0].manage("loaddata", "content_test")
servers[0].manage(
"generateuserdata", "--no-onboarding", "--num-content-items", "1"
)
facility_id = Facility.objects.using(s0_alias).get().id
learner1 = FacilityUser.objects.using(s0_alias).filter(roles__isnull=True)[0]
learner2 = FacilityUser.objects.using(s0_alias).filter(roles__isnull=True)[1]
learner2.set_password("syncing")
learner2.save(using=s0_alias)
# Test that we can single user sync with admin creds
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--username",
"superuser",
"--password",
"password",
"--facility",
facility_id,
"--user",
learner1.id,
)
# Test that we can single user sync with learner creds
servers[2].manage(
"sync",
"--baseurl",
s0_url,
"--username",
learner2.username,
"--password",
"syncing",
"--facility",
facility_id,
"--user",
learner2.id,
)
# Check that learner 1 is on server 1
self.assertTrue(
FacilityUser.objects.using(s1_alias).filter(id=learner1.id).exists()
)
# Check that learner 2 is not on server 1
self.assertFalse(
FacilityUser.objects.using(s1_alias).filter(id=learner2.id).exists()
)
# Check that learner 2 is on server 2
self.assertTrue(
FacilityUser.objects.using(s2_alias).filter(id=learner2.id).exists()
)
# Check that learner 1 is not on server 2
self.assertFalse(
FacilityUser.objects.using(s2_alias).filter(id=learner1.id).exists()
)
channel_id = "725257a0570044acbd59f8cf6a68b2be"
content_id = "9f9438fe6b0d42dd8e913d7d04cfb2b2"
servers[1].create_model(
ContentSessionLog,
channel_id=channel_id,
content_id=content_id,
user_id=learner1.id,
start_timestamp=timezone.now(),
kind="audio",
)
servers[1].create_model(
ContentSummaryLog,
channel_id=channel_id,
content_id=content_id,
user_id=learner1.id,
start_timestamp=timezone.now(),
kind="audio",
)
servers[2].create_model(
ContentSessionLog,
channel_id=channel_id,
content_id=content_id,
user_id=learner2.id,
start_timestamp=timezone.now(),
kind="audio",
)
servers[2].create_model(
ContentSummaryLog,
channel_id=channel_id,
content_id=content_id,
user_id=learner2.id,
start_timestamp=timezone.now(),
kind="audio",
)
servers[1].manage(
"sync",
"--baseurl",
s0_url,
"--facility",
facility_id,
"--user",
learner1.id,
)
# Test that we can single user sync with learner creds
servers[2].manage(
"sync",
"--baseurl",
s0_url,
"--facility",
facility_id,
"--user",
learner2.id,
)
self.assertEqual(
ContentSessionLog.objects.using(s0_alias)
.filter(channel_id=channel_id, content_id=content_id)
.count(),
2,
)
self.assertEqual(
ContentSummaryLog.objects.using(s0_alias)
.filter(channel_id=channel_id, content_id=content_id)
.count(),
2,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.