content stringlengths 5 1.05M |
|---|
import torch.nn as nn
# 5th Edition: VggNet-16 (up to 90.189%)
def Conv3x3BNReLU(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1),
nn.BatchNorm2d(out_channels),
nn.ReLU6(True)
)
class VGGNet(nn.Module):
def __init__(self): # (trainSize, 3, 224, 224)
super(VGGNet, self).__init__()
block_nums = [2, 2, 3, 3, 3] # vgg16
# block_nums = [2, 2, 4, 4, 4] # vgg19
self.stage1 = self._make_layers(3, 64, block_nums[0])
self.stage2 = self._make_layers(64, 128, block_nums[1])
self.stage3 = self._make_layers(128, 256, block_nums[2])
self.stage4 = self._make_layers(256, 512, block_nums[3])
self.stage5 = self._make_layers(512, 512, block_nums[4])
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.Dropout(0.2),
nn.Linear(4096, 4096),
nn.Dropout(0.2),
nn.Linear(4096, 2)
)
self._init_params()
@staticmethod
def _make_layers(in_channels, out_channels, block_num):
layers = [Conv3x3BNReLU(in_channels, out_channels)]
for i in range(1, block_num):
layers.append(Conv3x3BNReLU(out_channels, out_channels))
layers.append(nn.MaxPool2d(2, 2, ceil_mode=False))
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.stage5(x)
x = x.view(x.size(0), -1)
out = self.classifier(x)
return out
|
import sys
sys.path.append('../')
import os
from pathlib import Path
import time
import numpy as np
import scipy.optimize
import pickle
from py_diff_pd.common.common import ndarray, create_folder, rpy_to_rotation, rpy_to_rotation_gradient
from py_diff_pd.common.common import print_info, print_ok, print_error, PrettyTabular
from py_diff_pd.common.grad_check import check_gradients
from py_diff_pd.core.py_diff_pd_core import StdRealVector
from py_diff_pd.env.rolling_jelly_env_3d import RollingJellyEnv3d
def test_rolling_jelly(verbose):
seed = 42
folder = Path('rolling_jelly_3d')
refinement = 10
youngs_modulus = 2e6
poissons_ratio = 0.4
env = RollingJellyEnv3d(seed, folder, { 'refinement': refinement,
'youngs_modulus': youngs_modulus,
'poissons_ratio': poissons_ratio })
deformable = env.deformable()
# Setting thread number.
thread_cts = [2, 4, 8]
methods = ('newton_pcg', 'newton_cholesky', 'pd_eigen', 'pd_no_acc')
opts = ({ 'max_newton_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4 },
{ 'max_newton_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4 },
{ 'max_pd_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4,
'use_bfgs': 1, 'bfgs_history_size': 10 },
{ 'max_pd_iter': 5000, 'max_ls_iter': 10, 'abs_tol': 1e-9, 'rel_tol': 1e-4, 'verbose': 0, 'thread_ct': 4,
'use_bfgs': 1, 'bfgs_history_size': 10, 'use_acc': 0 })
dt = 5e-3
frame_num = 100
# Initial state.
dofs = deformable.dofs()
act_dofs = deformable.act_dofs()
q0 = env.default_init_position() + np.random.normal(scale=0.001, size=dofs)
radius = env.radius()
pivot = ndarray([radius, radius, 0])
omega = ndarray([0, 10.0, 0])
omega_x, omega_y, omega_z = omega
omega_skewed = ndarray([
[0, -omega_z, omega_y],
[omega_z, 0, -omega_x],
[-omega_y, omega_x, 0]
])
v0 = (q0.reshape((-1, 3)) @ -omega_skewed).ravel()
a0 = np.zeros(act_dofs)
f0 = np.zeros(dofs)
# Visualization.
if verbose:
for method, opt in zip(methods, opts):
_, _, info = env.simulate(dt, frame_num, 'pd_eigen' if method == 'pd_no_acc' else method,
opt, q0, v0, [a0 for _ in range(frame_num)],
[f0 for _ in range(frame_num)], require_grad=True, vis_folder=method)
print('{}: forward: {:3.3f}s; backward: {:3.3f}s'.format(method, info['forward_time'], info['backward_time']))
os.system('eog {}.gif'.format(folder / method))
# Benchmark time.
print('Reporting time cost. DoFs: {:d}, Contact DoFs: {:d}, frames: {:d}, dt: {:3.3e}'.format(dofs,
env.contact_dofs(), frame_num, dt))
rel_tols = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]
forward_backward_times = {}
forward_times = {}
backward_times = {}
losses = {}
grads = {}
for method in methods:
for thread_ct in thread_cts:
meth_thread_num = '{}_{}threads'.format(method, thread_ct)
forward_backward_times[meth_thread_num] = []
forward_times[meth_thread_num] = []
backward_times[meth_thread_num] = []
losses[meth_thread_num] = []
grads[meth_thread_num] = []
for rel_tol in rel_tols:
print_info('rel_tol: {:3.3e}'.format(rel_tol))
tabular = PrettyTabular({
'method': '{:^30s}',
'forward and backward (s)': '{:3.3f}',
'forward only (s)': '{:3.3f}',
'loss': '{:3.3f}',
'|grad|': '{:3.3f}'
})
print_info(tabular.head_string())
for method, opt in zip(methods, opts):
opt['rel_tol'] = rel_tol
for thread_ct in thread_cts:
opt['thread_ct'] = thread_ct
meth_thread_num = '{}_{}threads'.format(method, thread_ct)
loss, grad, info = env.simulate(dt, frame_num, 'pd_eigen' if method == 'pd_no_acc' else method,
opt, q0, v0, [a0 for _ in range(frame_num)],
[f0 for _ in range(frame_num)], require_grad=True, vis_folder=None)
grad_q, grad_v, grad_a, grad_f = grad
grad = np.zeros(q0.size + v0.size + a0.size + f0.size)
grad[:dofs] = grad_q
grad[dofs:2 * dofs] = grad_v
grad[2 * dofs:2 * dofs + act_dofs] = np.sum(ndarray(grad_a), axis=0)
grad[2 * dofs + act_dofs:] = np.sum(ndarray(grad_f), axis=0)
l, g, forward_time, backward_time = loss, grad, info['forward_time'], info['backward_time']
print(tabular.row_string({
'method': meth_thread_num,
'forward and backward (s)': forward_time + backward_time,
'forward only (s)': forward_time,
'loss': l,
'|grad|': np.linalg.norm(g) }))
forward_backward_times[meth_thread_num].append(forward_time + backward_time)
forward_times[meth_thread_num].append(forward_time)
backward_times[meth_thread_num].append(backward_time)
losses[meth_thread_num].append(l)
grads[meth_thread_num].append(g)
pickle.dump((rel_tols, forward_times, backward_times, losses, grads), open(folder / 'table.bin', 'wb'))
if __name__ == '__main__':
verbose = True
test_rolling_jelly(verbose) |
from radix_sort import radix_sort
def test_empty_list():
"""An empty list will return unchanged"""
lst = []
expected = []
actual = radix_sort(lst)
assert actual == expected
def test_single_item_list():
"""A one-itemed list will return unchanged"""
lst = [100]
expected = [100]
actual = radix_sort(lst)
assert actual == expected
def test_sorted_list():
"""A sorted list will return unchanged"""
lst = [1, 22, 23, 35, 586, 1476]
expected = [1, 22, 23, 35, 586, 1476]
actual = radix_sort(lst)
assert actual == expected
def test_randomly_unsorted_list():
"""An unsorted list returns sorted"""
lst = [23, 22, 476, 35, 1, 86]
expected = [1, 22, 23, 35, 86, 476]
actual = radix_sort(lst)
assert actual == expected
def test_backward_list():
"""A backward list will return reversed"""
lst = [1476, 586, 35, 23, 22, 1]
expected = [1, 22, 23, 35, 586, 1476]
actual = radix_sort(lst)
assert actual == expected
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm, t
from scipy.optimize import minimize
def pseudo_obs(data):
"""
take dataframe as argument and returns
Pseudo-observations from real data X
"""
pseudo_obs = data
for i in range(len(data.columns)):
order = pseudo_obs.iloc[:,i].argsort()
ranks = order.argsort()
pseudo_obs.iloc[:,i] = [ (r + 1) / (len(data) + 1) for r in ranks ]
return pseudo_obs
df = pd.read_csv("data/msci.csv")
df.index = pd.to_datetime(df["Date"], format="%m/%d/%Y")
df = df.drop(["Date"], axis=1)
for col in df.columns.values:
df[col] = np.log(df[col]) - np.log(df[col].shift(1))
df = df.dropna()
import pycop.multivariate.copula as cop
from pycop.bivariate.copula import archimedean
copl = archimedean(family="clayton")
copl.plot_cdf(theta=1.5, Nsplit=50)
from pycop.bivariate import estimation
#param, cmle = estimation.fit_cmle(copl, df[["US","UK"]])
#print(param)
cp = cop.archimedean(family="clayton", d=2)
cp.plot_cdf(theta=1.5, Nsplit=50)
cp.plot_pdf(theta=1.5, Nsplit=50)
"""
psd_obs = pseudo_obs(df[["US","UK"]])
bounded_opti_methods = ['L-BFGS-B', 'TNC', 'SLSQP', 'trust-constr']
def log_likelihood(theta):
return -sum([ np.log(cp.pdf(theta, [psd_obs.iloc[i,0],psd_obs.iloc[i,1]] ) ) for i in range(0,len(psd_obs))])
results = minimize(log_likelihood, cp.theta_start, method='L-BFGS-B', bounds=cp.bounds_param) #options={'maxiter': 300})#.x[0]
#print("method = ", opti_method, " - success = ", results.success, " - message: ", results.message)
if results.success == True:
print(results.x, -results.fun)
else:
print("optimization failed")
"""
|
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder,filename))
if img is not None:
images.append(img)
if len(images)>3:
break
fig=plt.figure(figsize=(10,12))
xrange=range(1,5)
for img,x in zip(images,xrange):
ax=fig.add_subplot(2,2,x)
ax.imshow(img)
ax.set_title(img.shape) |
import cv2
import numpy as np
img = cv2.imread('download.jpeg')
ret, threshold = cv2.threshold(img, 12, 255, cv2.THRESH_BINARY)
im2gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(im2gray, 10, 255, cv2.THRESH_BINARY)
adaptive_threshold = cv2.adaptiveThreshold(im2gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 115, 1)
cv2.imshow('original', img)
cv2.imshow('threshold', threshold)
cv2.imshow('adaptive', adaptive_threshold)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from typing import Dict, List
from MicroTokenizer import get_dict_file
from MicroTokenizer.data_structures.dictionary import DictionaryData
from MicroTokenizer.data_structures.non_recursive_algorithm import NonRecursiveAlgorithm
from MicroTokenizer.data_structures.train_dictionary import TrainDictionary
from MicroTokenizer.data_structures.trie_algorithm import TrieAlgorithm
from MicroTokenizer.tokenizers.base_tokenizer import BaseTokenizer
class DAGTokenizer(BaseTokenizer):
def __init__(self, token_dict: Dict[str, int] = None):
# for inference
self.trie_tree = TrieAlgorithm(raw_dict_data=token_dict) if token_dict else None
self.graph_builder = (
NonRecursiveAlgorithm(self.trie_tree) if token_dict else None
)
# for training
self.token_dict = TrainDictionary()
@classmethod
def load(cls, model_dir: str):
dict_file = get_dict_file(model_dir)
token_dict = DictionaryData.read_dict(dict_file)
return cls(token_dict)
def segment(self, message: str) -> List[str]:
self.graph_builder.init_graph()
self.graph_builder.build_graph(message)
self.graph_builder.compute_shortest_path()
raw_token = self.graph_builder.get_tokens()
# remove start and end token
return raw_token[1:-1]
def train(self, corpus):
for line in corpus:
self.token_dict.train_one_line(line)
self.token_dict.do_train()
# load the new mode
self.trie_tree = TrieAlgorithm(raw_dict_data=self.token_dict.dictionary)
self.graph_builder = NonRecursiveAlgorithm(self.trie_tree)
def save(self, output_dir: str):
self.token_dict.persist_to_dir(output_dir)
|
import openpyxl
from openpyxl import load_workbook
wb = load_workbook("testFiles/basicTest.xlsx")
name_list = wb.get_sheet_names()
print(name_list)
ws = wb['Sheet1']
ws.insert_rows(2)
for i in range(1,ws.max_column):
ws.cell(row=2, column=i).value=ws.cell(row=3, column=i).value
wb.save("testFiles/basicTest_2.xlsx")
|
################################################################################
"""
DJ JOE Website Playlist File Generator
--------------------------------------
(c) 2021 - Stanley Solutions - Joe Stanley
This application serves an interface to allow the recording of Apple Music or
Spotify playlists.
"""
################################################################################
# Requirements
from urllib.parse import urlparse
from fastapi import FastAPI, Request, Form
from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
# Locals
import spotify_client
import apple_music_client
from formatter import playlist_html_table
# Application Base
app = FastAPI()
# Mount the Static File Path
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
def page(request: Request, url: str = None):
"""Generate the HTML Page Content Using any Provided Playlist URL"""
data = ""
if url != None:
# "Switch" On Domain Name
domain = urlparse(url).netloc
if 'music.apple' in domain:
client = apple_music_client.ApplePlaylister(url)
elif 'spotify' in domain:
client = spotify_client.SpotifyPlaylister(url)
playlist, tracks = client()
data = playlist_html_table(
playlist=playlist,
tracks=tracks,
table_id="playlist",
classes="",
)
# Return Template Response Using Data
return templates.TemplateResponse(
"index.html",
{
"request": request,
"playlist_table": data,
},
)
# Main Application Response
@app.get("/", response_class=HTMLResponse)
async def root(request: Request):
return page(request=request)
# Redirect for Playlist Endpoint
@app.get("/load_playlist")
async def load_playlist_redirect():
return RedirectResponse("/")
# Load Playlist
@app.post("/load_playlist", response_class=HTMLResponse)
async def load_playlist(request: Request, playlist: str = Form(...)):
print(playlist)
return page(request=request, url=playlist) |
import torch
import torch.nn as nn
import torch.nn.functional as F
from hivemind.server.layers.custom_experts import register_expert_class
sample_input = lambda batch_size, hidden_dim: torch.empty((batch_size, hidden_dim))
@register_expert_class('perceptron', sample_input)
class MultilayerPerceptron(nn.Module):
def __init__(self, hidden_dim, num_classes=10):
super().__init__()
self.layer1 = nn.Linear(hidden_dim, 2 * hidden_dim)
self.layer2 = nn.Linear(2 * hidden_dim, 2 * hidden_dim)
self.layer3 = nn.Linear(2 * hidden_dim, num_classes)
def forward(self, x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.layer3(x)
return x
multihead_sample_input = lambda batch_size, hidden_dim: \
(torch.empty((batch_size, hidden_dim)),
torch.empty((batch_size, 2 * hidden_dim)),
torch.empty((batch_size, 3 * hidden_dim)),)
@register_expert_class('multihead', multihead_sample_input)
class MultiheadNetwork(nn.Module):
def __init__(self, hidden_dim, num_classes=10):
super().__init__()
self.layer1 = nn.Linear(hidden_dim, num_classes)
self.layer2 = nn.Linear(2 * hidden_dim, num_classes)
self.layer3 = nn.Linear(3 * hidden_dim, num_classes)
def forward(self, x1, x2, x3):
x = self.layer1(x1) + self.layer2(x2) + self.layer3(x3)
return x
|
import logging
import os
import signal
import sys
import time
from configparser import ConfigParser
import dns.resolver
import requests
SITES_CONFIG = 'config/sites.cfg'
URI_UPDATE = 'http://www.ovh.com/nic/update?system=dyndns&hostname=%(hostname)s&myip=%(ip)s'
URI_GET_IP = 'https://ifconfig.me/ip'
DNS_RESOLVERS = ['8.8.8.8']
UPDATE_INTERVAL = 60 * 10 # Seconds
class Domain:
"""Represent a domain or sub-domain that can update DNS entries with DynHost (ovh)."""
def __init__(self, domain: str, user: str, password: str):
"""Instanciate the domain object."""
self.domain = domain
self.credentials = (user, password)
self.logger = logging.getLogger(self.domain)
self.old_ip = None
def is_update_to_date(self, current_ip) -> bool:
"""Indicates if the domain address is up to date."""
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = DNS_RESOLVERS
answers = my_resolver.resolve(
qname=self.domain,
rdtype='A',
)
self.old_ip = answers[0].address
return answers[0].address == current_ip
def update(self, current_ip: str) -> None:
"""Update DNS entry only if the DNS address ip is outdated."""
if self.is_update_to_date(current_ip):
return
data = {
'hostname': self.domain,
'ip': current_ip,
}
req = requests.get(
url=URI_UPDATE % data,
auth=self.credentials,
)
if req.status_code == 200:
self.logger.info('Successfully updated from %s to %s.' %
(self.old_ip, current_ip))
elif req.status_code == 401:
self.logger.error('Failed to update DNS. Bad credentials.')
else:
self.logger.error(
'Failed to update DNS. Unexpected error happened.')
def get_current_ip() -> str:
"""Returns the current device ip address."""
req = requests.get(URI_GET_IP)
return req.text
def signal_handler(signum: int, frame):
print(end="\r")
logging.info("stopped")
sys.exit(0)
def main():
"""Configure logs, and update the domains configured in SITES_CONFIG file."""
os.makedirs('logs', exist_ok=True)
logging.basicConfig(
format='[%(asctime)s][%(levelname)s][%(name)s] %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
handlers=[
logging.FileHandler('logs/lastest.log'),
logging.StreamHandler()
],
)
if not os.path.exists(SITES_CONFIG):
logging.error('sites.conf does not exists.')
sys.exit(1)
config = ConfigParser()
config.read(os.path.join(SITES_CONFIG))
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
logging.info("started")
while True:
current_ip = get_current_ip()
for section in config.sections():
domain = Domain(
domain=section,
user=config.get(section, 'user'),
password=config.get(section, 'password'),
)
domain.update(current_ip)
time.sleep(UPDATE_INTERVAL)
if __name__ == '__main__':
main()
|
import AppKit
from PyObjCTools.TestSupport import TestCase, min_sdk_level
import objc
class TestNSDatePickerHelper(AppKit.NSObject):
def datePickerCell_validateProposedDateValue_timeInterval_(self, v1, v2, v3):
pass
class TestNSDatePickerCell(TestCase):
def testConstants(self):
self.assertEqual(AppKit.NSTextFieldAndStepperDatePickerStyle, 0)
self.assertEqual(AppKit.NSClockAndCalendarDatePickerStyle, 1)
self.assertEqual(AppKit.NSTextFieldDatePickerStyle, 2)
self.assertEqual(AppKit.NSSingleDateMode, 0)
self.assertEqual(AppKit.NSRangeDateMode, 1)
self.assertEqual(AppKit.NSHourMinuteDatePickerElementFlag, 0x000C)
self.assertEqual(AppKit.NSHourMinuteSecondDatePickerElementFlag, 0x000E)
self.assertEqual(AppKit.NSTimeZoneDatePickerElementFlag, 0x0010)
self.assertEqual(AppKit.NSYearMonthDatePickerElementFlag, 0x00C0)
self.assertEqual(AppKit.NSYearMonthDayDatePickerElementFlag, 0x00E0)
self.assertEqual(AppKit.NSEraDatePickerElementFlag, 0x0100)
self.assertEqual(AppKit.NSDatePickerStyleTextFieldAndStepper, 0)
self.assertEqual(AppKit.NSDatePickerStyleClockAndCalendar, 1)
self.assertEqual(AppKit.NSDatePickerStyleTextField, 2)
self.assertEqual(AppKit.NSDatePickerModeSingle, 0)
self.assertEqual(AppKit.NSDatePickerModeRange, 1)
self.assertEqual(AppKit.NSDatePickerElementFlagHourMinute, 0x000C)
self.assertEqual(AppKit.NSDatePickerElementFlagHourMinuteSecond, 0x000E)
self.assertEqual(AppKit.NSDatePickerElementFlagTimeZone, 0x0010)
self.assertEqual(AppKit.NSDatePickerElementFlagYearMonth, 0x00C0)
self.assertEqual(AppKit.NSDatePickerElementFlagYearMonthDay, 0x00E0)
self.assertEqual(AppKit.NSDatePickerElementFlagEra, 0x0100)
def testMethods(self):
o = TestNSDatePickerHelper.alloc().init()
m = o.datePickerCell_validateProposedDateValue_timeInterval_.__metadata__()
self.assertEqual(m["arguments"][3]["type"], b"N^@")
self.assertStartswith(m["arguments"][4]["type"], b"N^")
self.assertResultIsBOOL(AppKit.NSDatePickerCell.drawsBackground)
self.assertArgIsBOOL(AppKit.NSDatePickerCell.setDrawsBackground_, 0)
@min_sdk_level("10.10")
def testProtocols(self):
objc.protocolNamed("NSDatePickerCellDelegate")
|
# Generated by Django 2.2.4 on 2019-08-11 14:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('alarms_project', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='alarm',
name='sound',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='alarms_project.Sound'),
),
]
|
"""
config.py defines default archive appliance server properties
"""
hostname = "pscaa02"
data_port = 17668
mgmt_port = 17665
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Queue(object):
def __init__(self):
self.items = []
def get_size(self):
return len(self.items)
def is_empty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
if __name__ == "__main__":
q = Queue()
q.enqueue('hello')
q.enqueue('dog')
print q.items
q.enqueue(3)
q.dequeue()
print q.items |
from Functions.Controls.Keyboard.keyboard_pressed import *
from Objects.Entity.Character.Character import *
from Class.Entity.Character.Character import Character
from intValues import *
from Functions.NFC.getUID import *
if raspberry:
from Functions.Server.client import *
def switch_heroes():
if raspberry:
uid = getUID()
if keyboard_pressed() == "F1" or (nfc and uid == ['0xb0', '0xfa', '0x5b', '0x56', '0x90', '0x0']):
Character.charact_list[0].delete()
Fireman(_w=60, _h=120, _m=20, _g=1.4, _health=100, _speed=8, _shooting_rates=100, _damage=60)
if raspberry:
msgx = "fireman"
Sock.send(msgx.encode())
time.sleep(0.1)
elif keyboard_pressed() == "F2" or (nfc and uid == ['0x50', '0xe2', '0x5a', '0x56', '0x90', '0x0']):
Character.charact_list[0].delete()
Knight(_w=60, _h=120, _m=20, _g=1.4, _health=100, _speed=8, _shooting_rates=100, _damage=60)
if raspberry:
msgx = "knight"
Sock.send(msgx.encode())
time.sleep(0.1)
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
EXTENSIONS = ['ggrc_risk_assessments']
RISK_ASSESSMENT_URL = os.environ.get('GGRC_RISK_ASSESSMENT_URL', 'http://localhost:8080')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
[path]
cd /Users/brunoflaven/Documents/01_work/blog_articles/stop_starting_start_stopping/pandas_learning_basics/
[file]
python pandas_learning_basics_2.py
# source
- all to know to load pandas_dataframe_importing_csv Pandas
https://chrisalbon.com/code/python/data_wrangling/pandas_dataframe_importing_csv/
"""
import numpy as np
import pandas as pd
# OUTPUT_1
# df = pd.read_csv('data/world_happiness_report_2019.csv')
# df = pd.read_csv('data/world_happiness_report_2019_2.csv')
# print(df)
# OUTPUT_2
# df = pd.read_csv('data/world_happiness_report_2019_2.csv', header=None)
# print(df)
# Country (region),Ladder,SD of Ladder,Positive affect,Negative affect,Social support,Freedom,Corruption,Generosity,Log of GDP per capita, Healthy life expectancy
# df = pd.read_csv('data/world_happiness_report_2019.csv', names=['Country(region)', 'Ladder', 'SD of Ladder', 'Positive affect', 'Negative affect', 'Social support', 'Freedom', 'Corruption', 'Generosity', 'Log of GDP per capita', 'Healthy life expectancy'])
# print(df)
|
import pandas as pd
import os
#|Load data from single or multiple CSV files into internal DataFrame within "data" object
def from_csv(file_path, mode, file_search, index, sep):
if mode == 'single':
data_name = file_path[file_path.rfind('/')+1:-4]
print '-- Loading %s.csv --' % data_name
df = pd.read_csv(file_path, sep=sep)
else:
for fn in os.listdir(file_path):
if '.csv' in fn:
if file_search in fn or file_search == '':
print '-- Loading %s --' % fn
df = pd.read_csv(file_path+fn, sep=sep)
if mode == 'compile':
df = df.append(pd.read_csv(file_path+fn, sep=sep), ignore_index=True)
elif mode == 'all':
if index <> '':
df = df.set_index(index)
data_name = fn[:-4]
if mode == 'single' or mode == 'compile':
if mode == compile:
data_name = file_search
if index <> '':
df = df.set_index(index)
return df, data_name
#|Output data from internal DataFrame(s) to csv
def to_csv(data, type, data_name, file_path, index):
if data_name <> 'ALL':
file_string = '%s%s_%s.csv' % (file_path, type, data_name)
data.df[type][data_name].to_csv(file_string, index=index)
else:
for type in data.df:
for data_name in data.df[type]:
file_string = '%s%s_%s.csv' % (file_path, type, data_name)
data.df[type][data_name].to_csv(file_string, index=index) |
"""Head modules
"""
import numpy as np
from bpnet.utils import dict_prefix_key
from bpnet.metrics import ClassificationMetrics, RegressionMetrics
import keras.backend as K
import tensorflow as tf
import keras.layers as kl
import gin
import os
import abc
from bpnet.losses import ignoreNaNloss
class BaseHead:
# loss
# weight -> loss weight (1 by default)
# kwargs -> kwargs for the model
# name -> name of the module
# _model -> gets setup in the init
@abc.abstractmethod
def get_target(self, task):
pass
@abc.abstractmethod
def __call__(self, inp, task):
"""Useful for writing together the model
Returns the output tensor
"""
raise NotImplementedError
@abc.abstractmethod
def get_preact_tensor(self, graph=None):
"""Return the single pre-activation tensors
"""
pass
@abc.abstractmethod
def intp_tensors(self, preact_only=False, graph=None):
"""Dictionary of all available interpretation tensors
for `get_interpretation_node`
"""
raise NotImplementedError
# @abc.abstractmethod
# def get_intp_tensor(self, which=None):
# """Returns a target tensor which is a scalar
# w.r.t. to which to compute the outputs
# Args:
# which [string]: If None, use the default
# **kwargs: optional kwargs for the interpretation method
# Returns:
# scalar tensor
# """
# raise NotImplementedError
def copy(self):
from copy import deepcopy
return deepcopy(self)
class BaseHeadWBias(BaseHead):
@abc.abstractmethod
def get_bias_input(self, task):
pass
@abc.abstractmethod
def neutral_bias_input(self, task, length, seqlen):
pass
def id_fn(x):
return x
def named_tensor(x, name):
return kl.Lambda(id_fn, name=name)(x)
# --------------------------------------------
# Head implementations
@gin.configurable
class ActivityHead(BaseHeadWBias):
def __init__(self, target_name, # "{task}/scalar"
net, # function that takes a keras tensor and returns a keras tensor
activation=None,
loss=ignoreNaNloss,
loss_weight=1,
metric=RegressionMetrics(),
postproc_fn=None, # post-processing to apply so that we are in the right scale
# bias input
use_bias=False,
bias_net=None,
bias_input='bias/{task}/scalar',
bias_shape=(1,),
):
self.net = net
self.loss = loss
self.loss_weight = loss_weight
self.metric = metric
self.postproc_fn = postproc_fn
self.target_name = target_name
self.activation = activation
self.bias_input = bias_input
self.bias_net = bias_net
self.use_bias = use_bias
self.bias_shape = bias_shape
def get_target(self, task):
return self.target_name.format(task=task)
def __call__(self, inp, task):
o = self.net(inp)
# remember the tensors useful for interpretation (referred by name)
self.pre_act = o.name
# add the target bias
if self.use_bias:
binp = kl.Input(self.bias_shape, name=self.get_bias_input(task))
bias_inputs = [binp]
# add the bias term
if self.bias_net is not None:
bias_x = self.bias_net(binp)
# This allows to normalize the bias data first
# (e.g. when we have profile counts to aggregate it first)
else:
# Don't use the nn 'bias' so that when the measurement bias = 0,
# this term vanishes
bias_x = kl.Dense(1, use_bias=False)(binp)
o = kl.add([o, bias_x])
else:
bias_inputs = []
if self.activation is not None:
if isinstance(self.activation, str):
o = kl.Activation(self.activation)(o)
else:
o = self.activation(o)
self.post_act = o.name
# label the target op so that we can use a dictionary of targets
# to train the model
return named_tensor(o, name=self.get_target(task)), bias_inputs
def get_preact_tensor(self, graph=None):
if graph is None:
graph = tf.get_default_graph()
return graph.get_tensor_by_name(self.pre_act)
def intp_tensors(self, preact_only=False, graph=None):
"""Return the required interpretation tensors
"""
if graph is None:
graph = tf.get_default_graph()
if self.activation is None:
# the post-activation doesn't
# have any specific meaning when
# we don't use any activation function
return {"pre-act": graph.get_tensor_by_name(self.pre_act)}
if preact_only:
return {"pre-act": graph.get_tensor_by_name(self.pre_act)}
else:
return {"pre-act": graph.get_tensor_by_name(self.pre_act),
"output": graph.get_tensor_by_name(self.post_act)}
# def get_intp_tensor(self, which='pre-act'):
# return self.intp_tensors()[which]
def get_bias_input(self, task):
return self.bias_input.format(task=task)
def neutral_bias_input(self, task, length, seqlen):
"""Create dummy bias input
Return: (k, v) tuple
"""
shape = tuple([x if x is not None else seqlen
for x in self.bias_shape])
return (self.get_bias_input(task), np.zeros((length, ) + shape))
@gin.configurable
class ScalarHead(BaseHeadWBias):
def __init__(self, target_name, # "{task}/scalar"
net, # function that takes a keras tensor and returns a keras tensor
activation=None,
loss='mse',
loss_weight=1,
metric=RegressionMetrics(),
postproc_fn=None, # post-processing to apply so that we are in the right scale
# bias input
use_bias=False,
bias_net=None,
bias_input='bias/{task}/scalar',
bias_shape=(1,),
):
self.net = net
self.loss = loss
self.loss_weight = loss_weight
self.metric = metric
self.postproc_fn = postproc_fn
self.target_name = target_name
self.activation = activation
self.bias_input = bias_input
self.bias_net = bias_net
self.use_bias = use_bias
self.bias_shape = bias_shape
def get_target(self, task):
return self.target_name.format(task=task)
def __call__(self, inp, task):
o = self.net(inp)
# remember the tensors useful for interpretation (referred by name)
self.pre_act = o.name
# add the target bias
if self.use_bias:
binp = kl.Input(self.bias_shape, name=self.get_bias_input(task))
bias_inputs = [binp]
# add the bias term
if self.bias_net is not None:
bias_x = self.bias_net(binp)
# This allows to normalize the bias data first
# (e.g. when we have profile counts to aggregate it first)
else:
# Don't use the nn 'bias' so that when the measurement bias = 0,
# this term vanishes
bias_x = kl.Dense(1, use_bias=False)(binp)
o = kl.add([o, bias_x])
else:
bias_inputs = []
if self.activation is not None:
if isinstance(self.activation, str):
o = kl.Activation(self.activation)(o)
else:
o = self.activation(o)
self.post_act = o.name
# label the target op so that we can use a dictionary of targets
# to train the model
return named_tensor(o, name=self.get_target(task)), bias_inputs
def get_preact_tensor(self, graph=None):
if graph is None:
graph = tf.get_default_graph()
return graph.get_tensor_by_name(self.pre_act)
def intp_tensors(self, preact_only=False, graph=None):
"""Return the required interpretation tensors
"""
if graph is None:
graph = tf.get_default_graph()
if self.activation is None:
# the post-activation doesn't
# have any specific meaning when
# we don't use any activation function
return {"pre-act": graph.get_tensor_by_name(self.pre_act)}
if preact_only:
return {"pre-act": graph.get_tensor_by_name(self.pre_act)}
else:
return {"pre-act": graph.get_tensor_by_name(self.pre_act),
"output": graph.get_tensor_by_name(self.post_act)}
# def get_intp_tensor(self, which='pre-act'):
# return self.intp_tensors()[which]
def get_bias_input(self, task):
return self.bias_input.format(task=task)
def neutral_bias_input(self, task, length, seqlen):
"""Create dummy bias input
Return: (k, v) tuple
"""
shape = tuple([x if x is not None else seqlen
for x in self.bias_shape])
return (self.get_bias_input(task), np.zeros((length, ) + shape))
@gin.configurable
class BinaryClassificationHead(ScalarHead):
def __init__(self, target_name, # "{task}/scalar"
net, # function that takes a keras tensor and returns a keras tensor
activation='sigmoid',
loss='binary_crossentropy',
loss_weight=1,
metric=ClassificationMetrics(),
postproc_fn=None,
# bias input
use_bias=False,
bias_net=None,
bias_input='bias/{task}/scalar',
bias_shape=(1,),
):
# override the default
super().__init__(target_name,
net,
activation=activation,
loss=loss,
metric=metric,
postproc_fn=postproc_fn,
use_bias=use_bias,
bias_net=bias_net,
bias_input=bias_input,
bias_shape=bias_shape)
# TODO - mabye override the way we call outputs?
@gin.configurable
class ProfileHead(BaseHeadWBias):
"""Deals with the case where the output are multiple tracks of
total shape (L, C) (L = sequence length, C = number of channels)
Note: Since the contribution score will be a single scalar, the
interpretation method will have to aggregate both across channels
as well as positions
"""
def __init__(self, target_name, # "{task}/profile"
net, # function that takes a keras tensor and returns a keras tensor
activation=None,
loss='mse',
loss_weight=1,
metric=RegressionMetrics(),
postproc_fn=None,
# bias input
use_bias=False,
bias_net=None,
bias_input='bias/{task}/profile',
bias_shape=(None, 1),
):
self.net = net
self.loss = loss
self.loss_weight = loss_weight
self.metric = metric
self.postproc_fn = postproc_fn
self.target_name = target_name
self.activation = activation
self.bias_input = bias_input
self.bias_net = bias_net
self.use_bias = use_bias
self.bias_shape = bias_shape
def get_target(self, task):
return self.target_name.format(task=task)
def __call__(self, inp, task):
o = self.net(inp)
# remember the tensors useful for interpretation (referred by name)
self.pre_act = o.name
# add the target bias
if self.use_bias:
binp = kl.Input(self.bias_shape, name=self.get_bias_input(task))
bias_inputs = [binp]
# add the bias term
if self.bias_net is not None:
bias_x = self.bias_net(binp)
# This allows to normalize the bias data first
# (e.g. when we have profile counts to aggregate it first)
else:
# Don't use the nn 'bias' so that when the measurement bias = 0,
# this term vanishes
bias_x = kl.Conv1D(1, kernel_size=1, use_bias=False)(binp)
o = kl.add([o, bias_x])
else:
bias_inputs = []
if self.activation is not None:
if isinstance(self.activation, str):
o = kl.Activation(self.activation)(o)
else:
o = self.activation(o)
self.post_act = o.name
# label the target op so that we can use a dictionary of targets
# to train the model
return named_tensor(o, name=self.get_target(task)), bias_inputs
def get_preact_tensor(self, graph=None):
if graph is None:
graph = tf.get_default_graph()
return graph.get_tensor_by_name(self.pre_act)
@staticmethod
def profile_contrib(p):
"""Summarizing the profile for the contribution scores
wn: Normalized contribution (weighted sum of the contribution scores)
where the weighted sum uses softmax(p) to weight it
w2: Simple sum (p**2)
w1: sum(p)
winf: max(p)
"""
# Note: unfortunately we have to use the kl.Lambda boiler-plate
# to be able to do Model(inp, outputs) in deep-explain code
# Normalized contribution - # TODO - update with tensorflow
wn = kl.Lambda(lambda p:
K.mean(K.sum(K.stop_gradient(tf.nn.softmax(p, dim=-2)) * p, axis=-2), axis=-1)
)(p)
# Squared weight
w2 = kl.Lambda(lambda p:
K.mean(K.sum(p * p, axis=-2), axis=-1)
)(p)
# W1 weight
w1 = kl.Lambda(lambda preact_m:
K.mean(K.sum(preact_m, axis=-2), axis=-1)
)(p)
# Winf
# 1. max across the positional axis, average the strands
winf = kl.Lambda(lambda p:
K.mean(K.max(p, axis=-2), axis=-1)
)(p)
return {"wn": wn,
"w1": w1,
"w2": w2,
"winf": winf
}
def intp_tensors(self, preact_only=False, graph=None):
"""Return the required interpretation tensors (scalars)
Note: Since we are predicting a track,
we should return a single scalar here
"""
if graph is None:
graph = tf.get_default_graph()
preact = graph.get_tensor_by_name(self.pre_act)
postact = graph.get_tensor_by_name(self.post_act)
# Contruct the profile summary ops
preact_tensors = self.profile_contrib(preact)
postact_tensors = dict_prefix_key(self.profile_contrib(postact), 'output_')
if self.activation is None:
# the post-activation doesn't
# have any specific meaning when
# we don't use any activation function
return preact_tensors
if preact_only:
return preact_tensors
else:
return {**preact_tensors, **postact_tensors}
# def get_intp_tensor(self, which='wn'):
# return self.intp_tensors()[which]
def get_bias_input(self, task):
return self.bias_input.format(task=task)
def neutral_bias_input(self, task, length, seqlen):
"""Create dummy bias input
Return: (k, v) tuple
"""
shape = tuple([x if x is not None else seqlen
for x in self.bias_shape])
return (self.get_bias_input(task), np.zeros((length, ) + shape))
|
import logging
# WARNING = 30
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.getLogger("paramiko.transport").setLevel(logging.WARNING)
# INFO = 20
logging.getLogger("botocore").setLevel(logging.INFO)
logging.getLogger("smart_open").setLevel(logging.INFO)
logging.getLogger("urllib3").setLevel(logging.INFO)
|
from . import TransformFunction, string_to_tfarg_function, mime_type_based_transform
import htmlmth.mods.javascript
remove_comments = TransformFunction("",
"remove JavaScript comments",
mime_type_based_transform({
'text/html': string_to_tfarg_function(lambda x: htmlmth.mods.javascript.remove_comments(x)),
'text/javascript': string_to_tfarg_function(lambda x: htmlmth.mods.javascript.remove_comments(x)),
}))
|
import os
import sys
import pandas
import numpy
import argparse
import glob
import re
from collections import OrderedDict
from sklearn.preprocessing import StandardScaler
config_dict={
'xvector':9,
'pvector':32,
'OneHotvector' :2
}
speakers_dict={
'ffr0001_mmebovary':(1,1),
'ffr0012_mmebovary' : (1,12),
'mfr0013_comtesse':(-1,13),
'ffr0009_boule': (1,9),
'mfr0002_caglio': (-1,2),
'mfr0014_comtesse':(-1,14),
'ffr0011_comtesse':(1,11),
'mfr0008_notaire': (-1,8) ,
'mfr0014_notaire':(-1,14),
'ffr0012_boule': (1,12),
'mfr0013_caglio': (-1,13) ,
'mfr0015_boule':(-1,15)
}
def main():
parser=argparse.ArgumentParser(description='')
parser.add_argument('data', type=str, nargs=1, help='data')
parser.add_argument('out', type=str, nargs=1, help='input')
args=parser.parse_args()
full_path_data=args.data[0]
out_file_name=args.out[0]
data_config_list=[]
for speaker_book_name,OneHotvector in speakers_dict.items():
this_config_dict=OrderedDict()
this_config_dict['speaker_id']=speaker_book_name
this_config_dict['OneHotvector-0']=float(OneHotvector[0])
this_config_dict['OneHotvector-1']=float(OneHotvector[1])
xvector_config=os.path.join(full_path_data,speaker_book_name+'-xvector.txt')
if os.path.exists(xvector_config):
with open(xvector_config,'r') as xcf:
for i,value in enumerate(xcf.readline().strip().split(';')[:9]):
this_config_dict['xvector-'+str(i).zfill(3)]=float(value)
pvector_config=os.path.join(full_path_data,speaker_book_name+'-pvector.txt')
if os.path.exists(pvector_config):
with open(pvector_config,'r') as pcf:
for j,value in enumerate(pcf.readline().strip().split(';')):
this_config_dict['pvector-'+str(j).zfill(3)]=float(value)
data_config_list.append(this_config_dict)
extraFeat_df=pandas.DataFrame(data_config_list)
#extraFeat_df.to_csv(out_file_name,index=False)
X_train=extraFeat_df.iloc[:,1:]
y_train=extraFeat_df['speaker_id'].copy()
for col in X_train.columns:
X_train[col] = StandardScaler().fit_transform(X_train[col].values.reshape(-1,1))
projected_df = pandas.DataFrame(X_train)
projected_df['speaker_id'] =y_train.values.tolist()
#projected_df.concat(y_train)
#projected_df.columns = y_train.values.tolist()
#print(projected_df.columns)
# print(projected_df.head)
projected_df.to_csv(out_file_name,index=False)
#extratFeat_df_norm = StandardScaler().fit_transform(dataFrame)
#print(extratFeat_df_norm.shape)
#print(extratFeat_df_norm[2,:])
if __name__ == '__main__':
main()
|
class Solution:
"""
@param time:
@return: return a string represents time
"""
def nextTime(self, time):
timeSplit = time.split(":")
if len(time) != 5 or len(timeSplit) != 2:
return "-1"
minute = int(timeSplit[0])
second = int(timeSplit[1])
if minute > 23 or second > 59:
return "-1"
while True:
second += 1
if second == 60:
second = 0
minute += 1
if minute == 24:
minute = 0
digit1 = int(minute / 10)
digit2 = minute % 10
digit3 = int(second / 10)
digit4 = second % 10
if digit1 != digit2 and digit1 != digit3 and digit1 != digit4 and \
digit2 != digit3 and digit2 != digit4 and digit3 != digit4:
return str(digit1) + str(digit2) + ":" + str(digit3) + str(digit4) |
#!/usr/bin/python3
# Copyright (c) 2018-2021 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import re
import sys
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from credential_helper import CredentialHelper
from dell_nfv import ConfigOvercloud
from ironic_helper import IronicHelper
from logging_helper import LoggingHelper
from utils import Utils
logging.basicConfig()
logger = logging.getLogger(os.path.splitext(os.path.basename(sys.argv[0]))[0])
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
home_dir = os.path.expanduser('~')
UC_USERNAME = UC_PASSWORD = UC_PROJECT_ID = UC_AUTH_URL = ''
class ConfigEdge(ConfigOvercloud):
"""
Description: Class responsible for overcloud configurations.
"""
ironic = IronicHelper()
ironic_client = ironic.get_ironic_client()
nodes = ironic_client.node.list()
get_drac_credential = CredentialHelper()
def __init__(self, overcloud_name, node_type, node_type_data):
self.node_type = node_type
self.node_type_data = json.loads(node_type_data)
self.mtu = int(self.node_type_data["nfv_mtu"])
_dir = (re.sub(r'[^a-z0-9]', " ", node_type.lower()).replace(" ", "_"))
_ntl = re.sub(r'[^a-z0-9]', "", node_type.lower())
ne_name = "nic_environment_{}.yaml".format(_ntl)
instack_name = "instackenv_{}.json".format(_ntl)
nic_env_file = os.path.join(home_dir, _dir, ne_name)
instackenv_file = os.path.join(home_dir, _dir, instack_name)
self.instackenv = instackenv_file
self.nic_env = nic_env_file
super().__init__(overcloud_name)
def fetch_nfv_parameters(self):
logger.debug("Retrieving NFV parameters")
ntd = self.node_type_data
enable_hugepage = Utils.string_to_bool(ntd["hpg_enable"])
enable_numa = Utils.string_to_bool(ntd["numa_enable"])
nfv_type = self._get_nfv_type(ntd)
is_ovs_dpdk = bool(nfv_type and nfv_type in ["dpdk", "both"])
hostos_cpu_count = int(ntd["numa_hostos_cpu_count"])
_dir = (re.sub(r'[^a-z0-9]', " ",
self.node_type.lower()).replace(" ", "_"))
ntl = re.sub(r'[^a-z0-9]', "", self.node_type.lower())
_f_name = "nic_environment_{}.yaml".format(ntl)
nic_env_file = os.path.join(home_dir, _dir, _f_name)
params = {}
params_dell_env = params["dell_env"] = {}
kernel_args = "iommu=pt intel_iommu=on"
if enable_hugepage:
hpg_num = self.nfv_params.calculate_hugepage_count(
ntd["hpg_size"])
kernel_args += (" default_hugepagesz={} hugepagesz={}"
" hugepages={}").format(ntd["hpg_size"],
ntd["hpg_size"][0:-1],
str(hpg_num))
if enable_numa:
_, node_data = self.nfv_params.select_compute_node(self.node_type,
self.instackenv)
self.nfv_params.parse_data(node_data)
self.nfv_params.get_all_cpus()
self.nfv_params.get_host_cpus(hostos_cpu_count)
if is_ovs_dpdk:
dpdk_nics = self.find_ifaces_by_keyword(nic_env_file,
'Dpdk')
logger.debug("DPDK-NICs >>" + str(dpdk_nics))
self.nfv_params.get_pmd_cpus(self.mtu, dpdk_nics)
self.nfv_params.get_socket_memory(self.mtu, dpdk_nics)
self.nfv_params.get_nova_cpus()
self.nfv_params.get_isol_cpus()
kernel_args += " isolcpus={}".format(self.nfv_params.isol_cpus)
# dell-environmment
nova_cpus = self.nfv_params.nova_cpus
params_dell_env["NovaComputeCpuDedicatedSet"] = nova_cpus
if is_ovs_dpdk:
params_dpdk = params["dpdk"] = {}
params_dpdk["OvsDpdkCoreList"] = self.nfv_params.host_cpus
params_dpdk["NovaComputeCpuSharedSet"] = self.nfv_params.host_cpus
params_dpdk["OvsPmdCoreList"] = self.nfv_params.pmd_cpus
params_dpdk["OvsDpdkSocketMemory"] = self.nfv_params.socket_mem
params_dpdk["IsolCpusList"] = self.nfv_params.isol_cpus
params_dell_env["KernelArgs"] = kernel_args
return params
def _get_nfv_type(self, node_type_data):
if ("nfv_type" in node_type_data
and len(node_type_data["nfv_type"].strip()) != 0
and node_type_data["nfv_type"].strip() in ("dpdk",
"sriov", "both")):
return node_type_data["nfv_type"].strip()
return None
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--overcloud_name",
default=None,
help="The name of the overcloud")
parser.add_argument("--edge_site",
default=None,
dest="node_type",
help="The name of edge site being configured")
parser.add_argument("--edge_site_data",
default=None,
dest="node_type_data",
help="The edge site metadata")
parser.add_argument("--debug",
default=False,
action='store_true',
help="Turn on debugging for this script")
LoggingHelper.add_argument(parser)
args = parser.parse_args()
LoggingHelper.configure_logging(args.logging_level)
config_edge = ConfigEdge(args.overcloud_name, args.node_type,
args.node_type_data)
params = config_edge.fetch_nfv_parameters()
logger.debug(">>>>>> nfv parameters {}".format(str(params)))
return json.dumps(params)
if __name__ == "__main__":
res = main()
logger.debug(">>>>>> res {}".format(str(res)))
sys.stdout.write(res)
|
import pytest
from numpy.testing import assert_almost_equal
import astropy.units as u
from sunpy.instr import fermi
from sunpy.time import parse_time
@pytest.mark.remote_data
def test_download_weekly_pointing_file():
# set a test date
date = parse_time('2011-10-01')
afile = fermi.download_weekly_pointing_file(date)
assert isinstance(afile, str)
assert afile.endswith('.fits')
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=5)
def test_detector_angles():
# set a test date
date = parse_time('2012-02-15')
file = fermi.download_weekly_pointing_file(date)
det = fermi.get_detector_sun_angles_for_date(date, file)
assert len(det) == 13
assert_almost_equal(det['n0'][0].value, 21.73944, decimal=1)
assert_almost_equal(det['n1'][0].value, 30.62983, decimal=1)
assert_almost_equal(det['n2'][0].value, 74.67486, decimal=1)
assert_almost_equal(det['n3'][0].value, 30.46062, decimal=1)
assert_almost_equal(det['n4'][0].value, 73.89734, decimal=1)
assert_almost_equal(det['n5'][0].value, 58.99893, decimal=1)
assert_almost_equal(det['n6'][0].value, 47.31091, decimal=1)
assert_almost_equal(det['n7'][0].value, 70.63391, decimal=1)
assert_almost_equal(det['n8'][0].value, 106.30992, decimal=1)
assert_almost_equal(det['n9'][0].value, 70.07033, decimal=1)
assert_almost_equal(det['n10'][0].value, 106.97884, decimal=1)
assert_almost_equal(det['n11'][0].value, 121.09603, decimal=1)
det2 = fermi.get_detector_sun_angles_for_time(
parse_time('2012-02-15 02:00'), file)
assert len(det2) == 13
assert type(det2) == dict
assert_almost_equal(det2['n0'].value, 83.76092, decimal=1)
assert_almost_equal(det2['n1'].value, 66.65847, decimal=1)
assert_almost_equal(det2['n10'].value, 123.28952, decimal=1)
assert_almost_equal(det2['n11'].value, 170.69869, decimal=1)
assert_almost_equal(det2['n2'].value, 58.78532, decimal=1)
assert_almost_equal(det2['n3'].value, 66.69068, decimal=1)
assert_almost_equal(det2['n4'].value, 57.16402, decimal=1)
assert_almost_equal(det2['n5'].value, 9.04924, decimal=1)
assert_almost_equal(det2['n6'].value, 112.21230, decimal=1)
assert_almost_equal(det2['n7'].value, 127.35783, decimal=1)
assert_almost_equal(det2['n8'].value, 122.98894, decimal=1)
assert_almost_equal(det2['n9'].value, 126.95987, decimal=1)
def test_met_to_utc():
time = fermi.met_to_utc(500000000)
assert (time - parse_time('2016-11-05T00:53:16.000')) < 1e-7 * u.s
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import time
import serial
from cached_property import cached_property
class MHS2300:
def __init__(self, port_name="COM3"):
self.port = serial.Serial(port_name, 57600, timeout=0.1)
#Always turns on both channels
self.port.write(':01,w611,000\r\n'.encode())
self.port.write(':01,w621,000\r\n'.encode())
Wdict={0:'Sine',1:'Square',2:'Triangle'}
prefix = {3:'K',6:'M',9:'G'}
@staticmethod
def Freq(kk):
kk1="{:e}".format(kk)
Order=kk1[kk1.find('+')+1:]
Extra=int(Order)%3
Real_Order=int(Order)-Extra
Valor=str(kk/10**Real_Order)
try:
u=prefix[Real_Order]+'Hz'
Valor= Valor+u
return Valor
except:
return Valor
@staticmethod
def Waveform(self,In):
try:
return(self.Wdict[In])
except:
return('Custom')
@staticmethod
def decodeN(self,s,ch):
AA=s.split(',')
#print(int(AA[1][3:]))
a1=self.Waveform(self,int(AA[1][3:]))
a2=self.Freq(int(AA[2][3:]))
a3=str(int(AA[3][3:])/100)
a4=str(int(AA[4][3:])-100)
a5=str(int(AA[5][3:])/10)
a6=str(int(AA[6][3:])/10)
print('Channel: '+str(ch)+' \n'+'Waveform: '+a1+'\nFrequency: '+a2+'\nAmplitude: '+a3+'V\nOffset: '+a4+' \nDuty:'+a5+' \nPhase: '+a6)
def send(self,command):
command =':01,'+ command +',000\r\n'
self.port.write(command.encode())
data=self.port.readline()
data_clean=data.decode().strip()
return data_clean
def on(self,Channel):
A=self.send('w6'+str(Channel)+'1')
def off(self,Channel):
A=self.send('w6'+str(Channel)+'0')
def Frequency(self,Channel,Value):
A=self.send('w'+str(22+Channel)+str(int(Value*100)))
def Amplitude(self,Channel,Value):
A=self.send('w'+str(24+Channel)+str(int(Value*100)))
def Offset(self,Channel,Value):
A=self.send('w'+str(26+Channel)+str(int(Value)+100))
def Duty(self,Channel,Value):
A=self.send('w'+str(28+Channel)+str(int(Value)*10))
def Phase(self,Channel,Value):
A=self.send('w'+str(30+Channel)+str(int(Value)))
def ReadChannel(self,Channel):
List=[]
while len(List)!=91:
if Channel==1:
List=self.send('r21,r23,r25,r27,r29,r31')
elif Channel==2:
List=self.send('r22,r24,r26,r28,r30,r32')
print(List)
def ChannelInfo(self,Channel):
List=[]
while len(List)!=91:
if Channel==1:
List=self.send('r21,r23,r25,r27,r29,r31')
elif Channel==2:
List=self.send('r22,r24,r26,r28,r30,r32')
self.decodeN(self,List,Channel)
|
from dataclasses import dataclass
from transformers import PretrainedConfig, PreTrainedTokenizer, PreTrainedModel
from ..transformersx_base import log, join_path
from .models import task_model
from .model_config import TaskModelConfig
@dataclass
class TaskModel:
config: PretrainedConfig
tokenizer: PreTrainedTokenizer
model: PreTrainedModel
class TaskModelFactory:
def __init__(self, task_name, config: TaskModelConfig, model_class=None):
self._task_name = task_name
self.config = config
self._model_class = model_class
self._model_cache = {}
def _create_task_model(self, model_class=None):
t_model = task_model(model_path=self.config.model_name,
model_task_type=self.config.model_task_type,
language=self.config.language,
framework=self.config.framework)
# use the custom class to replace the model Class
if model_class is not None:
t_model.model_class = model_class
log.info("Built task model: {}".format(str(t_model)))
return t_model
def _load_task_model(self, model_base_path, model_class=None) -> TaskModel:
t_model = self._create_task_model(model_class)
config, tokenizer, model = t_model.load(
num_labels=self.config.num_labels,
unit_test=self.config.unit_test,
cache_dir=model_base_path
)
log.info(
"Loaded task model, config: {}, tokenizer: {}, "
"model: {}".format(str(config),
type(tokenizer),
type(model))
)
return TaskModel(config=config, tokenizer=tokenizer, model=model)
def _freeze_parameters(self, model):
self._freeze_weights_main(model)
if hasattr(model, 'num_parameters'):
log.info("num params:" + str(model.num_parameters()))
log.info("num trainable params:" + str(model.num_parameters(only_trainable=True)))
if hasattr(model, 'named_parameters'):
log.info("Model Parameters Details: ")
for name, param in model.named_parameters():
log.info("{}:{}".format(name, param.size()))
def _freeze_weights_main(self, model):
if model is None or not self.config.freeze_main: return
main_parameters = eval("self.model." + model.main_parameter)
if hasattr(main_parameters, "parameters"):
for param in main_parameters.parameters():
param.requires_grad = False
def get_task_model(self, model_path=None, for_train=True) -> TaskModel:
cached_name = 'pretrained' if not model_path else model_path
task_model = self._model_cache.get(cached_name)
if task_model: return task_model
if not model_path:
model_path = self.config.model_pretrained_dir
task_model = self._load_task_model(model_path, self._model_class)
if for_train:
self._freeze_parameters(task_model.model)
return task_model
|
#!/usr/bin/env python3
import json
import requests
s = requests.Session()
class NanagogoError(Exception):
def __init__(self, status_code, http_status_code, error_message):
self.status_code = status_code
self.http_status_code = http_status_code
self.error_message = error_message
class NanagogoResponse(object):
"""The response from the 7gogo private JSON API, returned as
either a dict or a list."""
pass
class NanagogoResponseDict(dict, NanagogoResponse):
pass
class NanagogoResponseList(list, NanagogoResponse):
pass
class NanagogoRequest(object):
url_template = 'https://api.7gogo.jp/web/v2/{}'
response = None
error_translations = {'リンクに問題があるか、ページが削除された可能性があります。': 'Not found'}
def __init__(self, path, method="GET", params={}, data=None):
if isinstance(path, str) and path.startswith('https'):
self.url = path
elif isinstance(path, (list, tuple)):
self.url = self.url_template.format('/'.join(path))
else:
self.url = self.url_template.format(path)
self.method = method.lower()
self.params = params
self.data = data
self.start()
def start(self):
try:
requests_method = getattr(s, self.method)
except AttributeError:
raise
self.response = requests_method(self.url,
params=self.params,
data=self.data)
try:
self.response.raise_for_status()
except requests.exceptions.HTTPError:
error = self._get_error(self.response)
raise NanagogoError(*error)
def _get_error(self, response):
http_status_code = response.status_code
try:
data = json.loads(response.text)
if isinstance(data['error'], dict):
status = data['error']['code']
error_message = data['error']['message']
if error_message in self.error_translations:
error_eng = self.error_translations[error_message]
error_message = '{} ({})'.format(error_eng,
error_message)
else:
status = data['status']
error_message = data['error']
return (status, http_status_code, error_message)
except ValueError:
raise
def wrap(self):
data = json.loads(self.response.text)['data']
if isinstance(data, list):
res = NanagogoResponseList(data)
elif isinstance(data, dict):
res = NanagogoResponseDict(data)
res.response = self.response
res.headers = res.response.headers
return res
if __name__ == "__main__":
pass
|
import time
import board
import busio
import digitalio
import sys
from adafruit_mcp230xx.mcp23017 import MCP23017
i2c = busio.I2C(board.SCL, board.SDA)
mcp = MCP23017(i2c) # , address=0x20) # MCP23017
alarm1 = mcp.get_pin(0)
alarm2 = mcp.get_pin(1)
alarm3 = mcp.get_pin(2)
alarm1.switch_to_output(value=False)
alarm2.switch_to_output(value=False)
alarm3.switch_to_output(value=False)
while True:
# Seven Alarm modes plus off.
alarm1.value = False
alarm2.value = False
alarm3.value = False
time.sleep(2.0)
alarm1.value = True
alarm2.value = False
alarm3.value = False
time.sleep(2.0)
alarm1.value = True
alarm2.value = True
alarm3.value = False
time.sleep(2.0)
alarm1.value = True
alarm2.value = True
alarm3.value = True
time.sleep(2.0)
alarm1.value = True
alarm2.value = False
alarm3.value = True
time.sleep(2.0)
alarm1.value = False
alarm2.value = True
alarm3.value = True
time.sleep(2.0)
alarm1.value = False
alarm2.value = False
alarm3.value = True
time.sleep(2.0)
alarm1.value = False
alarm2.value = True
alarm3.value = False
time.sleep(2.0)
|
from aiida.orm import Dict, load_node
from aiida.engine import submit
from aiida import load_profile
# import the FleurinpgenCalculation
# load ingpen Code
# create a StuctureData
structures = [Fe_structrure, Ni_structrure, Co_structrure]
# create a parameters Dict
# options
# assemble inputs in a single dictionary
# submit
for structure in structures:
#DO NOT FORGET TO PRING OUT THE PK OF EACH SUBMISSION
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
import fiber.templatetags.fiber_tags
from fiber.models import ContentItem, Page
from ...test_util import RenderMixin
class TestShowContent(RenderMixin, TestCase):
def setUp(self):
self.home = Page.objects.create(title='home', url='/')
self.contact = ContentItem.objects.create(
name='contact', content_html='<p><a href="mailto:email@example.com">Contact me<a></p>')
# Staff user
self.staff = User.objects.create_user('staff', 'staff@example.com', password='staff')
self.staff.is_staff = True
self.staff.save()
def test_show_content(self):
self.assertRendered(
'{% load fiber_tags %}{% show_content "contact" %}',
'<div class="content"><p><a href="mailto:email@example.com">Contact me<a></p></div>')
def test_show_content_staff_fiber_page(self):
"""Render a content item for a staff user on a fiber page"""
self.assertRendered(
'{% load fiber_tags %}{% show_content "contact" %}',
'''
<div data-fiber-data='{ "can_edit": true, "type": "content_item", "id": %(contact_pk)s, "url": "%(edit_url_contact)s", "add_url": "", "page_id": 1, "block_name": "" }' class="content">
<p><a href="mailto:email@example.com">Contact me<a></p>
</div>''' % {
'contact_pk': self.contact.pk,
'edit_url_contact': reverse('fiber_admin:fiber_contentitem_change', args=[self.contact.pk])
}, {'fiber_page': self.home, 'user': self.staff})
def test_show_content_staff_non_fiber_page(self):
"""Render a content item for a staff user on a non-fiber page"""
self.assertRendered(
'{% load fiber_tags %}{% show_content "contact" %}',
'''
<div data-fiber-data='{ "can_edit": true, "type": "content_item", "id": %(contact_pk)s, "url": "%(edit_url_contact)s" }' class="content">
<p><a href="mailto:email@example.com">Contact me<a></p>
</div>''' % {
'contact_pk': self.contact.pk,
'edit_url_contact': reverse('fiber_admin:fiber_contentitem_change', args=[self.contact.pk])
}, {'user': self.staff})
def test_show_content_that_does_not_exist(self):
self.assertRendered('{% load fiber_tags %}{% show_content "missing" %}', '')
class TestAutoCreate(RenderMixin, TestCase):
def setUp(self):
self._auto_create = fiber.templatetags.fiber_tags.AUTO_CREATE_CONTENT_ITEMS
fiber.templatetags.fiber_tags.AUTO_CREATE_CONTENT_ITEMS = True
self.staff = User.objects.create_user('staff', 'staff@example.com', password='staff')
self.staff.is_staff = True
self.staff.save()
def tearDown(self):
fiber.templatetags.fiber_tags.AUTO_CREATE_CONTENT_ITEMS = self._auto_create
def test_auto_create(self):
self.assertEqual(ContentItem.objects.all().count(), 0)
self.assertRendered('{% load fiber_tags %}{% show_content "missing" %}', '<div class="content"></div>')
self.assertEqual(ContentItem.objects.all().count(), 1)
def test_auto_create_staff(self):
self.assertRendered(
'{% load fiber_tags %}{% show_content "missing" %}',
'''
<div data-fiber-data='{ "can_edit": true, "type": "content_item", "id": %(item_pk)s, "url": "%(edit_url_item)s" }' class="content"></div>''' % {
'item_pk': 1,
'edit_url_item': reverse('fiber_admin:fiber_contentitem_change', args=[1])
}, {'user': self.staff})
|
import numpy as np
from skimage import data
import napari
with napari.gui_qt():
blobs = data.binary_blobs(
length=100, blob_size_fraction=0.05, n_dim=3, volume_fraction=0.03
).astype(float)
viewer = napari.view_image(blobs.astype(float), ndisplay=3)
n = 50
shape = [[[n, 40, 40], [n, 40, 60], [n + 20, 60, 60], [n + 20, 60, 40]]]
properties = {'z_index': [n]}
text = {'text': 'z_index', 'color': 'green', 'anchor': 'upper_left'}
shapes_layer = viewer.add_shapes(
shape,
edge_color=[0, 1, 0, 1],
face_color='transparent',
properties=properties,
text=text,
)
|
from config import TOKEN, DBHost, DBName, DBUser, DBPassword
class Config(object):
"""
Configuration base, for all environments
"""
DEBUG = False
TESTING = False
# We add a secret TOKEN, it is necessary for user authorization through LDAP to work
SECRET_KEY = TOKEN
CSRF_ENABLED = True
# Default parameter SQLALCHEMY_TRACK_MODIFICATIONS
SQLALCHEMY_TRACK_MODIFICATIONS = True
# Fix SESSION_COOKIE_SAMESITE
SESSION_COOKIE_SAMESITE = "Strict"
class ProductionConfig(Config):
SQLALCHEMY_ENGINE_OPTIONS = {
"max_overflow": 15,
"pool_pre_ping": True,
"pool_recycle": 60 * 60,
"pool_size": 30,
}
# Adding DB file on flask app
SQLALCHEMY_DATABASE_URI = (
f"postgresql://{DBUser}:{DBPassword}@{DBHost}:5432/{DBName}"
)
# Fix SQLALCHEMY_TRACK_MODIFICATIONS
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevelopmentConfig(Config):
# Adding DB file on flask app
SQLALCHEMY_DATABASE_URI = "sqlite:///devices.db"
SQLALCHEMY_TRACK_MODIFICATIONS = False
DEBUG = True
class TestingConfig(Config):
TESTING = True
|
from django.db import models
# No related name is needed here, since symmetrical relations are not
# explicitly reversible.
class SelfRefer(models.Model):
name = models.CharField(max_length=10)
references = models.ManyToManyField('self')
related = models.ManyToManyField('self')
def __unicode__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=10)
def __unicode__(self):
return self.name
# A related_name is required on one of the ManyToManyField entries here because
# they are both addressable as reverse relations from Tag.
class Entry(models.Model):
name = models.CharField(max_length=10)
topics = models.ManyToManyField(Tag)
related = models.ManyToManyField(Tag, related_name="similar")
def __unicode__(self):
return self.name
# Two models both inheriting from a base model with a self-referential m2m field
class SelfReferChild(SelfRefer):
pass
class SelfReferChildSibling(SelfRefer):
pass
# Many-to-Many relation between models, where one of the PK's isn't an Autofield
class Line(models.Model):
name = models.CharField(max_length=100)
class Worksheet(models.Model):
id = models.CharField(primary_key=True, max_length=100)
lines = models.ManyToManyField(Line, blank=True, null=True)
__test__ = {"regressions": """
# Multiple m2m references to the same model or a different model must be
# distinguished when accessing the relations through an instance attribute.
>>> s1 = SelfRefer.objects.create(name='s1')
>>> s2 = SelfRefer.objects.create(name='s2')
>>> s3 = SelfRefer.objects.create(name='s3')
>>> s1.references.add(s2)
>>> s1.related.add(s3)
>>> e1 = Entry.objects.create(name='e1')
>>> t1 = Tag.objects.create(name='t1')
>>> t2 = Tag.objects.create(name='t2')
>>> e1.topics.add(t1)
>>> e1.related.add(t2)
>>> s1.references.all()
[<SelfRefer: s2>]
>>> s1.related.all()
[<SelfRefer: s3>]
>>> e1.topics.all()
[<Tag: t1>]
>>> e1.related.all()
[<Tag: t2>]
# The secret internal related names for self-referential many-to-many fields
# shouldn't appear in the list when an error is made.
>>> SelfRefer.objects.filter(porcupine='fred')
Traceback (most recent call last):
...
FieldError: Cannot resolve keyword 'porcupine' into field. Choices are: id, name, references, related, selfreferchild, selfreferchildsibling
# Test to ensure that the relationship between two inherited models
# with a self-referential m2m field maintains symmetry
>>> sr_child = SelfReferChild(name="Hanna")
>>> sr_child.save()
>>> sr_sibling = SelfReferChildSibling(name="Beth")
>>> sr_sibling.save()
>>> sr_child.related.add(sr_sibling)
>>> sr_child.related.all()
[<SelfRefer: Beth>]
>>> sr_sibling.related.all()
[<SelfRefer: Hanna>]
# Regression for #11311 - The primary key for models in a m2m relation
# doesn't have to be an AutoField
>>> w = Worksheet(id='abc')
>>> w.save()
>>> w.delete()
"""
}
|
# Module data
import requests
from bs4 import BeautifulSoup
import pandas as pd
from datetime import timedelta,date,datetime
import matplotlib.pyplot as plt
import seaborn as sns
import os
def wrangle(wd, cache):
print("Wrangling data ...")
print(str.format('wd={}',wd))
data_path = wd + '/data/data.csv'
cacheFound = False
if cache:
print("Checking for cached data ...")
if os.path.isfile(data_path):
print(" ... found.")
cacheFound = True
else:
print(" ... not found. Downloading ...")
if (not cache) or (cache and not cacheFound):
row_count,col_count = scrape_url('http://content.caiso.com',data_path)
print(str.format("Saved {0} rows x {1} columns in file {2}",row_count,col_count,data_path))
return data_path
def scrape_url(base_url,file_path):
page = requests.get(str.format('{0}/green/renewrpt/files.html',base_url))
soup = BeautifulSoup(page.content, 'html.parser')
links = soup.find_all('a')
dataframe = pd.DataFrame(columns=['DATE','HOUR','RENEWABLES','NUCLEAR','THERMAL','IMPORTS','HYDRO'])
for link in links:
href = link.attrs['href']
if str.endswith(href,'.txt'):
file_url = base_url + href
frame = wrangle_data(file_url)
dataframe = dataframe.append(frame, ignore_index = True)
os.makedirs(os.path.dirname(file_path, exist_ok=True))
dataframe.to_csv(file_path, index = False)
return dataframe.shape[0], dataframe.shape[1]
def wrangle_data(file_url):
print(str.format("Wrangling {0} ...",file_url))
f = requests.get(file_url)
txt = f.text
date = txt.splitlines()[0].split()[0]
tbl_txt = txt[str.find(txt,'Hourly Breakdown of Total Production by Resource Type'):]
tbl_lines = tbl_txt.splitlines()
frame = pd.DataFrame(columns=['DATE','HOUR','RENEWABLES','NUCLEAR','THERMAL','IMPORTS','HYDRO'])
for i in range(2,26):
tbl_line=tbl_lines[i]
row = tbl_line.split()
frame = frame.append({'DATE':date,'HOUR':row[0],'RENEWABLES':row[1],'NUCLEAR':row[2],'THERMAL':row[3],'IMPORTS':row[4],'HYDRO':row[5]}, ignore_index = True)
return frame
def preprocess(data_path, cache):
print("Preprocessiong data ...")
preprocessed_data_path = os.path.dirname(data_path) + '/preprocessed_data.csv'
preprocessedDataFound = False
if cache:
print("Caching is enabled. Checking " + preprocessed_data_path + ' ...')
if os.path.isfile(preprocessed_data_path):
print(' ... Found.')
preprocessedDataFound = True
else:
print(' ... Not found!')
if (not cache) or (cache and not preprocessedDataFound):
dataframe = pd.read_csv(data_path)
dataframe = df_cleanse(dataframe)
dataframe = df_format(dataframe)
dataframe = df_fill(dataframe)
dataframe = df_engineer(dataframe)
os.makedirs(os.path.dirname(preprocessed_data_path), exist_ok=True)
dataframe.to_csv(preprocessed_data_path, index = False)
print(str.format("Saved preprocessed data: {0} rows x {1} columns in file {2}",dataframe.shape[0],dataframe.shape[1],preprocessed_data_path))
plot(preprocessed_data_path)
return preprocessed_data_path
def df_cleanse(df):
print("Cleansing ...")
# replace HOUR=2R with HOUR=24
ids = df.loc[df['HOUR'] == '2R'].index
for id in ids:
df.loc[id].HOUR = '24'
# replace #REF! with 0 in RENEWABLES and NUCLEAR
ids = df.loc[df['RENEWABLES'] == '#REF!'].index
for id in ids:
df.loc[id].RENEWABLES = '0'
df.loc[id].NUCLEAR = '0'
# replace #NAME? with 0 in entire row
ids = df.loc[df['RENEWABLES'] == '#NAME?'].index
for id in ids:
df.loc[id].RENEWABLES = '0'
df.loc[id].NUCLEAR = '0'
df.loc[id].THERMAL = '0'
df.loc[id].IMPORTS = '0'
df.loc[id].HYDRO = '0'
# replace #VALUE! with 0 in RENEWABLES, THERMAL, IMPORTS and HYDRO
ids = df.loc[df['RENEWABLES'] == '#VALUE!'].index
for id in ids:
df.loc[id].RENEWABLES = '0'
ids = df.loc[df['THERMAL'] == '#VALUE!'].index
for id in ids:
df.loc[id].THERMAL = '0'
ids = df.loc[df['IMPORTS'] == '#VALUE!'].index
for id in ids:
df.loc[id].IMPORTS = '0'
ids = df.loc[df['HYDRO'] == '#VALUE!'].index
for id in ids:
df.loc[id].HYDRO = '0'
return df
def df_format(df):
print("Formatting ...")
df['IMPUTED'] = '0'
df['DATE'] = pd.to_datetime(df['DATE'])
df['HOUR'] = pd.to_numeric(df['HOUR'], downcast='integer')
df['RENEWABLES'] = pd.to_numeric(df['RENEWABLES'])
df['NUCLEAR'] = pd.to_numeric(df['NUCLEAR'])
df['THERMAL'] = pd.to_numeric(df['THERMAL'])
df['IMPORTS'] = pd.to_numeric(df['IMPORTS'])
df['HYDRO'] = pd.to_numeric(df['HYDRO'])
df['IMPUTED'] = pd.to_numeric(df['IMPUTED'])
return df
def df_fill(df):
df = df_fill_dates(df)
df = df_fill_values(df)
return df
def df_fill_dates(df):
print("Filling missing dates ...")
start_date = min(df.DATE).date()
end_date = max(df.DATE).date()
for single_date in daterange(start_date, end_date):
print(single_date.strftime("%m/%d/%y"))
dt = datetime(single_date.year,single_date.month,single_date.day)
date_data = df.loc[df['DATE'] == dt]
hrs = len(date_data)
if (hrs < 24) :
#fill missing hours
for hr in range(1,25):
hr_data = df.loc[(df['DATE'] == dt) & (df['HOUR'] == hr)]
if len(hr_data) < 1 :
print(str.format("Adding DATE: {0}, HOUR: {1}", dt.strftime("%m/%d/%y"), hr))
new_row = {'DATE': dt, 'HOUR': hr, 'RENEWABLES': 0, 'NUCLEAR': 0, 'THERMAL': 0, 'IMPORTS': 0, 'HYDRO': 0, 'IMPUTED': 0}
df = df.append(new_row, ignore_index=True)
df.sort_values(by=['DATE','HOUR'], inplace=True, ascending=True)
df = df.reset_index()
df = df.drop(columns=['index'])
return df
def df_fill_values(df):
print("Filling missing values ...")
for col in ['RENEWABLES', 'NUCLEAR', 'THERMAL', 'IMPORTS', 'HYDRO']:
df = df_fill_values_col(df, col)
return df
def df_fill_values_col(df, col):
print(str.format("Column {0} ...",col))
cdata = df[col]
l = len(cdata)
last_value=0
i = 0
while i < l:
if cdata[i] == 0:
# find next nonzero value
j = i
while (j < l) and (cdata[j] == 0):
j = j + 1
if j == l:
next_value = 0
else:
next_value = cdata[j]
# fill values with average
j = i
avg = (last_value + next_value)/2
while (j<l) and (cdata[j] == 0):
print(str.format(" Imputing column {0} row {1} as {2} ...", col, j, avg))
df.at[j,col] = avg
df.at[j,'IMPUTED'] = 1
j = j + 1
i = j - 1
else:
# remember last nonzero value
last_value = cdata[i]
i = i + 1
return df
def df_engineer(df):
print("Feature engineering ...")
print(" Adding timestamps ...")
df['TIMESTAMP'] = df['DATE']
l = len(df)
for r in range(0,l):
df.at[r,'TIMESTAMP'] = df['DATE'][r] + timedelta(hours=int(df['HOUR'][r])-1)
print(" Adding NONRENEWABLES ...")
df['NONRENEWABLES'] = df['NUCLEAR'] + df['THERMAL'] + df['HYDRO'] + df['IMPORTS']
print(" Adding RENEWABLES_PCT ...")
df['RENEWABLES_PCT'] = 100*(df['RENEWABLES'] / (df['RENEWABLES'] + df['NONRENEWABLES']))
return df
def split(data_path, train_pct):
print("Splitting data ...")
print(str.format(" Training: {0}%, Testing {1}%", train_pct, 100 - train_pct))
dataframe = pd.read_csv(data_path)
split_idx = int(len(dataframe) * (train_pct/100))
split_idx = split_idx - split_idx % 24
df_train = dataframe[:split_idx]
df_test = dataframe[split_idx+1:]
wd = os.path.dirname(data_path)
train_data_path = wd + '/train_data.csv'
test_data_path = wd + '/test_data.csv'
os.makedirs(wd, exist_ok=True)
df_train.to_csv(train_data_path, index = False)
df_test.to_csv(test_data_path, index = False)
return train_data_path, test_data_path
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def plot(data_path):
# load data
print("Creating data plots ...")
df = pd.read_csv(data_path)
df['TIMESTAMP'] = df['TIMESTAMP'].astype('datetime64')
df.set_index('TIMESTAMP',inplace=True)
# plot data
df.head()
plt.rcParams["figure.figsize"] = (12,9)
x = df.index
y = df['RENEWABLES_PCT']
daily = y.resample('24H').mean()
day = daily.index
hour = df['HOUR'].astype(int)
plt.subplot(2,1,1)
plt.scatter(day,daily)
plt.title('Renewables Power Production (%)')
plt.subplot(2, 1, 2)
sns.boxplot(hour, y)
plt.title('Renewables Power Production (%) grouped by Hour')
wd = os.path.dirname(data_path) + '/..'
os.makedirs(wd, exist_ok=True)
plt.savefig(wd + '/images/history.png')
print(str.format("Saved data plot as {} ", wd+'/images/history.png'))
#plt.show() |
import torch
import numpy as np
import matplotlib.pyplot as plt
def generate_sample_data(cfg, toTensor, device):
np.random.seed(cfg['RANDOM_SEED'])
Z = np.random.normal(0, 1, [cfg['NUM_FACTORS'],cfg['NUM_SAMPLES']]) # ~ N(0,1)
L = np.random.rand(cfg['NUM_FEATURES'], cfg['NUM_FACTORS'])
S = np.abs(np.diag(np.random.rand(cfg['NUM_FEATURES']))) # Is a diagonal matrix
U = np.zeros([cfg['NUM_FEATURES'], cfg['NUM_SAMPLES']])
for i in range(cfg['NUM_SAMPLES']):
U[:,i] = np.random.normal(0, np.diag(S))
X = np.matmul(L,Z) + U
if toTensor:
Z = torch.Tensor(Z).to(device)
L = torch.Tensor(L).to(device)
S = torch.Tensor(S).to(device)
U = torch.Tensor(U).to(device)
X = torch.Tensor(X).to(device)
return Z, L, S, U, X
def plot(values, metric):
plt.figure(figsize=[16,9])
plt.title(f"{metric} vs EM iterations.")
plt.plot(values, label=metric)
plt.xlabel("EM Iterations")
plt.ylabel("Prediction Error")
plt.legend()
plt.grid()
plt.show() |
# -*- coding: utf-8 -*-
"""Tests all methods on Client"""
# Import standard library
import glob
# Import modules
import pytest
# Import from package
import linksight as ls
def test_dataset_match(mock_api, client, match_response):
"""Test if Dataset.match() returns the expected value and type"""
file = glob.glob('./**/test_areas.csv', recursive=True)
with open(file[0], 'r') as fp:
ds = client.create_dataset(fp)
resp = ds.match(source_prov_col='Province')
assert isinstance(resp, ls.resource.resources.Match)
assert resp.keys() == match_response.keys()
def test_dataset_match_no_columns(mock_api, client):
"""Test if ValueError is raised whenever no column names are passed"""
file = glob.glob('./**/test_areas.csv', recursive=True)
with open(file[0], 'r') as fp:
ds = client.create_dataset(fp)
with pytest.raises(ValueError):
ds.match() # No column names
@pytest.mark.webtest
def test_dataset_match_web(client):
"""Test if Dataset.match() returns the expected type"""
file = glob.glob('./**/test_areas.csv', recursive=True)
with open(file[0], 'r') as fp:
ds = client.create_dataset(fp)
resp = ds.match(source_prov_col='Province')
assert isinstance(resp, ls.resource.resources.Match)
@pytest.mark.webtest
def test_dataset_match_no_columns_web(client):
"""Test if ValueError is raised whenever no column names are passed"""
file = glob.glob('./**/test_areas.csv', recursive=True)
with open(file[0], 'r') as fp:
ds = client.create_dataset(fp)
with pytest.raises(ValueError):
ds.match() # No column names
|
from inspect import getsource
import heapq
import matplotlib.pyplot as plt
import networkx as nx
from matplotlib import lines
import ipywidgets as widgets
from search import GraphProblem, romania_map
from notebook import final_path_colors
def show_tree(graph_data, node_colors = None):
G = nx.Graph(graph_data['graph_dict'])
node_colors = node_colors or graph_data['node_colors']
node_positions = graph_data['node_positions']
node_label_pos = graph_data['node_label_positions']
edge_weights= graph_data['edge_weights']
# set the size of the plot
plt.figure(figsize=(8,5))
# draw the graph (both nodes and edges) with locations
nx.draw(G, pos={k: node_positions[k] for k in G.nodes()},
node_color=[node_colors[node] for node in G.nodes()], linewidths=0.3, edgecolors='k')
# draw labels for nodes
node_label_handles = nx.draw_networkx_labels(G, pos=node_label_pos, font_size=14)
# add a white bounding box behind the node labels
[label.set_bbox(dict(facecolor='white', edgecolor='none')) for label in node_label_handles.values()]
# add edge labels to the graph (for displaying the edge_weights)
if next(iter(edge_weights.values())) != None:
nx.draw_networkx_edge_labels(G, pos=node_positions, edge_labels=edge_weights, font_size=14)
# add a legend
white_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="white")
orange_circle = lines.Line2D([], [], color="orange", marker='o', markersize=15, markerfacecolor="orange")
red_circle = lines.Line2D([], [], color="red", marker='o', markersize=15, markerfacecolor="red")
gray_circle = lines.Line2D([], [], color="gray", marker='o', markersize=15, markerfacecolor="gray")
green_circle = lines.Line2D([], [], color="green", marker='o', markersize=15, markerfacecolor="green")
plt.legend((white_circle, orange_circle, red_circle, gray_circle, green_circle),
('Un-explored', 'Frontier', 'Currently Exploring', 'Explored', 'Final Solution'),
numpoints=1, prop={'size':14}, loc=(.8,.75))
# show the plot. No need to use in notebooks. nx.draw will show the graph itself.
plt.show()
## helper functions for visualisations
def display_steps(graph_data, user_input, algorithm=None, problem=None):
initial_node_colors = graph_data['node_colors']
if user_input == False:
def slider_callback(iteration):
# don't show graph for the first time running the cell calling this function
try:
show_tree(graph_data, node_colors=all_node_colors[iteration])
except:
pass
def visualize_callback(Visualize):
if Visualize is True:
button.value = False
global all_node_colors
iterations, all_node_colors, node = algorithm(problem)
solution = node.solution()
all_node_colors.append(final_path_colors(all_node_colors[0], problem, solution))
slider.max = len(all_node_colors) - 1
for i in range(slider.max + 1):
slider.value = i
#time.sleep(.5)
slider = widgets.IntSlider(min=0, max=1, step=1, value=0)
slider_visual = widgets.interactive(slider_callback, iteration=slider)
display(slider_visual)
button = widgets.ToggleButton(value=False)
button_visual = widgets.interactive(visualize_callback, Visualize=button)
display(button_visual)
if user_input == True:
node_colors = dict(initial_node_colors)
if isinstance(algorithm, dict):
assert set(algorithm.keys()).issubset({"Breadth First Tree Search",
"Depth First Tree Search",
"Breadth First Search",
"Depth First Graph Search",
"Best First Graph Search",
"Uniform Cost Search",
"Depth Limited Search",
"Iterative Deepening Search",
"Greedy Best First Search",
"A-star Search",
"Recursive Best First Search"})
algo_dropdown = widgets.Dropdown(description="Search algorithm: ",
options=sorted(list(algorithm.keys())),
value="Breadth First Tree Search")
display(algo_dropdown)
elif algorithm is None:
print("No algorithm to run.")
return 0
def slider_callback(iteration):
# don't show graph for the first time running the cell calling this function
try:
show_tree(graph_data, node_colors=all_node_colors[iteration])
except:
pass
def visualize_callback(Visualize):
if Visualize is True:
button.value = False
problem = GraphProblem(start_dropdown.value, end_dropdown.value, romania_map)
global all_node_colors
user_algorithm = algorithm[algo_dropdown.value]
iterations, all_node_colors, node = user_algorithm(problem)
solution = node.solution()
all_node_colors.append(final_path_colors(all_node_colors[0], problem, solution))
slider.max = len(all_node_colors) - 1
for i in range(slider.max + 1):
slider.value = i
#time.sleep(.5)
start_dropdown = widgets.Dropdown(description="Start city: ",
options=sorted(list(node_colors.keys())), value="Arad")
display(start_dropdown)
end_dropdown = widgets.Dropdown(description="Goal city: ",
options=sorted(list(node_colors.keys())), value="Fagaras")
display(end_dropdown)
button = widgets.ToggleButton(value=False)
button_visual = widgets.interactive(visualize_callback, Visualize=button)
display(button_visual)
slider = widgets.IntSlider(min=0, max=1, step=1, value=0)
slider_visual = widgets.interactive(slider_callback, iteration=slider)
display(slider_visual)
class PriorityQueue:
"""A Queue in which the minimum (or maximum) element (as determined by f and
order) is returned first.
If order is 'min', the item with minimum f(x) is
returned first; if order is 'max', then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order='min', f=lambda x: x):
self.heap = []
if order == 'min':
self.f = f
elif order == 'max': # now item with max f(x)
self.f = lambda x: -f(x) # will be popped first
else:
raise ValueError("order must be either 'min' or 'max'.")
def append(self, item):
"""Insert item at its correct position."""
heapq.heappush(self.heap, (self.f(item), item))
def extend(self, items):
"""Insert each item in items at its correct position."""
for item in items:
self.append(item)
def pop(self):
"""Pop and return the item (with min or max f(x) value)
depending on the order."""
if self.heap:
return heapq.heappop(self.heap)[1]
else:
raise Exception('Trying to pop from empty PriorityQueue.')
def getvalue(self, key):
"""Returns the first value associated with key in PriorityQueue.
Raises KeyError if key is not present."""
for value, item in self.heap:
if item == key:
return value, item
raise KeyError(str(key) + " is not in the priority queue")
def __len__(self):
"""Return current capacity of PriorityQueue."""
return len(self.heap)
def __contains__(self, key):
"""Return True if the key is in PriorityQueue."""
return any([item == key for _, item in self.heap])
def __getitem__(self, key):
"""Returns the first value associated with key in PriorityQueue.
Raises KeyError if key is not present."""
for value, item in self.heap:
if item == key:
return item
raise KeyError(str(key) + " is not in the priority queue")
def __delitem__(self, key):
"""Delete the first occurrence of key."""
try:
del self.heap[[item == key for _, item in self.heap].index(True)]
except ValueError:
raise KeyError(str(key) + " is not in the priority queue")
heapq.heapify(self.heap)
|
import os
import click
from biom import load_table
from deicode.rpca import rpca
from deicode._rpca_defaults import (DEFAULT_RANK, DEFAULT_MSC, DEFAULT_MFC,
DEFAULT_ITERATIONS, DESC_RANK, DESC_MSC,
DESC_MFC, DESC_ITERATIONS)
@click.command()
@click.option('--in-biom', help='Input table in biom format.', required=True)
@click.option('--output-dir', help='Location of output files.', required=True)
@click.option(
'--n_components',
default=DEFAULT_RANK,
show_default=True,
help=DESC_RANK)
@click.option(
'--min-sample-count',
default=DEFAULT_MSC,
show_default=True,
help=DESC_MSC)
@click.option(
'--min-feature-count',
default=DEFAULT_MFC,
show_default=True,
help=DESC_MFC)
@click.option(
'--max_iterations',
default=DEFAULT_ITERATIONS,
show_default=True,
help=DESC_ITERATIONS)
def standalone_rpca(in_biom: str, output_dir: str, n_components: int,
min_sample_count: int, min_feature_count: int,
max_iterations: int) -> None:
"""Runs RPCA with an rclr preprocessing step."""
# import table
table = load_table(in_biom)
ord_res, dist_res = rpca(table,
n_components,
min_sample_count,
min_feature_count,
max_iterations)
# If it doesn't already exist, create the output directory.
# Note that there is technically a race condition here: it's ostensibly
# possible that some process could delete the output directory after we
# check that it exists here but before we write the output files to it.
# However, in this case, we'd just get an error from skbio.io.util.open()
# (which is called by skbio.OrdinationResults.write()), which makes sense.
os.makedirs(output_dir, exist_ok=True)
# write files to output directory
# Note that this will overwrite files in the output directory that share
# these filenames (analogous to QIIME 2's behavior if you specify the
# --o-biplot and --o-distance-matrix options, but differing from QIIME 2's
# behavior if you specify --output-dir instead).
ord_res.write(os.path.join(output_dir, 'ordination.txt'))
dist_res.write(os.path.join(output_dir, 'distance-matrix.tsv'))
if __name__ == '__main__':
standalone_rpca()
|
import query
import repo
from lazy_record.errors import *
from inflector import Inflector, English
inflector = Inflector(English)
models = {}
associations = {}
foreign_keys = {}
scopes = {}
def model_from_name(parent_name):
return models[inflector.classify(parent_name)]
def _verify_type_match(record, association):
associated_model = model_from_name(association)
if record is None:
return
if not isinstance(record, associated_model):
raise AssociationTypeMismatch(
"Expected record of type {expected}, got {actual}.".format(
expected=associated_model.__name__,
actual=record.__class__.__name__
))
def model_has_foreign_key_for_table(table, model):
fk = foreign_keys_for(model).get(inflector.singularize(table), None)
if fk is None:
return True
return fk in model.__attributes__
def foreign_keys_for(klass):
if type(klass) == str:
klass_name = klass
else:
klass_name = klass.__name__
foreign_keys[klass_name] = foreign_keys.get(klass_name, {})
return foreign_keys[klass_name]
def associations_for(klass):
if type(klass) == str:
klass_name = klass
else:
klass_name = klass.__name__
associations[klass_name] = associations.get(klass_name, {})
return associations[klass_name]
def scopes_for(klass):
klass_name = klass.__name__
scopes[klass_name] = scopes.get(klass_name, {})
return scopes[klass_name]
class belongs_to(object):
"""
Decorator to establish this model as the child in a one-to-many
relationship.
"""
def __init__(self, parent_name, foreign_key=None):
"""
+parent_name+ is the parent model (e.g. if a post has many comments,
the comment will have one post: post is the +parent_name+).
+foreign_key+ is the foreign key used in the child (this) record to
hold the id of the parent record. By default it is the parent's model
name, snake-cased, with "_id" appended (e.g. Post -> "post_id").
Creates a setter and getter property for the parent record.
ex)
>>> comment.post_id
1
>>> comment.post
Post(id=1)
>>> other_post
Post(id=1)
>>> comment.post = other_post
>>> comment.post
Post(id=2)
>>> comment.post_id
2
"""
self.parent_name = parent_name
self.foreign_key = foreign_key or inflector.foreignKey(parent_name)
def __call__(self, klass):
# Add the model to the registry of known models with associations
models[klass.__name__] = klass
# Set the foreign key in the model in case it needs to be looked up
foreign_keys_for(klass)[self.parent_name] = self.foreign_key
# Add the relationship to the association list
associations_for(klass)[self.parent_name] = None
# Getter method for the parent record (e.g. comment.post)
# Is added to the class as a property
def parent_record_getter(wrapped_obj):
parent = model_from_name(self.parent_name)
# Not using parent.find() because it raises if it cannot find
q = query.Query(parent)
return q.where(id=getattr(wrapped_obj, self.foreign_key)).first()
# Setter method for updating the foreign key in this object
def parent_record_setter(wrapped_obj, new_parent):
if new_parent is not None:
_verify_type_match(new_parent, self.parent_name)
# We are setting a parent: grab it's id and use it
setattr(wrapped_obj, self.foreign_key, new_parent.id)
else:
# Un-setting a parent, set the foreign key to None
# Can't use new_parent.id since new_parent is None
setattr(wrapped_obj, self.foreign_key, None)
# Add setter and getter to class as properties
setattr(klass, self.parent_name,
property(parent_record_getter, parent_record_setter))
# Add the foreign key to the attribute dict of the model
# Doing so in such a way as to not mutate the dict, otherwise it can
# override the value in lazy_record.Base (and thus all models)
new_attributes = dict(klass.__attributes__)
new_attributes[self.foreign_key] = int
klass.__attributes__ = new_attributes
return klass
# Currently exists only so that all models get registered
class has_many(object):
"""
Decorator to establish this model as the parent in a one-to-many
relationship or as one part of a many-to-many relationship
"""
def __init__(self, child_name, scope=lambda query: query,
foreign_key=None, through=None):
"""
+child_name+ is the child model (e.g. if a post has many comments:
comments is the +child_name+). +foreign_key+ is the foreign key used in
the child (this) record to hold the id of the parent record. By default
it is the parent's model name, snake-cased, with "_id" appended
(e.g. Post -> "post_id"). If this is a many-to-many relationship,
+through+ is the joining table.
Creates a getter property for child records and (if applicable the
joining records).
ex)
>>> post.comments
<lazy_record.Query [Comment(id=1)]>
"""
self.child_name = child_name
self.foreign_key = foreign_key
self.through = through
self.scope = scope
def __call__(self, klass):
self.klass = klass
our_name = inflector.singularize(repo.Repo.table_name(klass))
child_model_name = inflector.classify(self.child_name)
scopes_for(klass)[self.child_name] = self.scope
# If we are doing an implicit has_many using through, we should define it fully
if self.through and self.through not in associations_for(klass):
klass = has_many(self.through)(klass)
if self.through and self.through not in associations_for(child_model_name):
# Set up the association for the child
# Assume a one-many tree unless already defined otherwise
associations_for(child_model_name)[our_name] = \
inflector.singularize(self.through)
# if no foreign key was passed, we should calculate it now based on
# the class name
self.foreign_key = self.foreign_key or inflector.foreignKey(our_name)
models[klass.__name__] = klass
# Add the foreign key to the fk list
if not self.through:
foreign_keys_for(klass)[self.child_name] = self.foreign_key
# Add the childs associations and foreign keys as if they had
# a belongs_to
foreign_keys_for(child_model_name)[our_name] = self.foreign_key
associations_for(child_model_name)[our_name] = None
# Add the relationship to the association list
associations_for(klass)[self.child_name] = self.through
# Add the child table (or joining table) to the classes dependents
# so that if this record is destroyed, all related child records
# (or joining records) are destroyed with it to prevent orphans
if self.through:
if self.through in foreign_keys_for(klass):
klass.__dependents__ = klass.__dependents__ + [self.through]
else:
klass.__dependents__ = klass.__dependents__ + [self.child_name]
if self.through:
# Do the query with a join
def child_records_method(wrapped_obj):
child = model_from_name(self.child_name)
# No guarentee that self.through is the last in the chain
# It could be the other part of a many-to-many
# Or it could be a through that is a couple of levels down
# e.g. Category has many Posts through Threads
# (but chain is Category -> Forum -> Thread -> Post)
result = query.Query(child, record=wrapped_obj).joins(
repo.Repo.table_name(wrapped_obj.__class__)).where(
**{repo.Repo.table_name(wrapped_obj.__class__):
{'id': wrapped_obj.id}})
return self.scoping(result)
else:
# Don't do a join
def child_records_method(wrapped_obj):
child = model_from_name(self.child_name)
q = query.Query(child, record=wrapped_obj)
where_statement = {self.foreign_key: wrapped_obj.id}
return self.scoping(q.where(**where_statement))
setattr(klass, self.child_name, property(child_records_method))
return klass
def scoping(self, query):
current = self.klass
scopes = []
while True:
# Anything other than a has_many won't have an entry,
# so we return the identity scope
scopes.append(scopes_for(current).get(self.child_name,
lambda query: query))
# Should give either a joining model or None
# TODO handle case for has_one
# currently, only has_many_supports scoping
joiner = associations_for(current).get(self.child_name)
# End of the line
if joiner is None:
break
# Get the next in the list by looking at the joiner model
current = model_from_name(joiner)
for scope in scopes:
query = scope(query)
return query
class has_one(object):
"""
Decorator to establish this model as the parent in a one-to-one
relationship.
"""
def __init__(self, child_name, foreign_key=None, through=None):
self.child_name = child_name
self.foreign_key = foreign_key
self.through = through
if str(self.through).endswith('s'):
raise AssociationForbidden(
"Cannot have one '{}' through many '{}'".format(
self.child_name,
self.through,
))
def __call__(self, klass):
our_name = inflector.singularize(repo.Repo.table_name(klass))
child_model_name = inflector.classify(self.child_name)
self.foreign_key = self.foreign_key or inflector.foreignKey(our_name)
klass.__dependents__ = klass.__dependents__ + [self.child_name]
# Add the relationship to the association list
associations_for(klass)[self.child_name] = self.through
# Add the foreign key to the fk list
foreign_keys_for(klass)[self.child_name] = self.foreign_key
models[klass.__name__] = klass
if self.through and self.through not in associations_for(child_model_name):
# Set up the association for the child
# Assume a one-many tree unless already defined otherwise
associations_for(child_model_name)[our_name] = self.through
if self.through:
def child_record_method(wrapped_obj):
child = model_from_name(self.child_name)
return query.Query(child, record=wrapped_obj).joins(
repo.Repo.table_name(wrapped_obj.__class__)).where(
**{repo.Repo.table_name(wrapped_obj.__class__):
{'id': wrapped_obj.id}}).first()
def set_child_record_method(wrapped_obj, new_value):
_verify_type_match(new_value, self.child_name)
child = model_from_name(self.child_name)
table = repo.Repo.table_name(wrapped_obj.__class__)
q = query.Query(child, record=wrapped_obj).joins(table
).where(**{table: {'id': wrapped_obj.id}})
# Get the previous value
old_value = q.first()
# Recall that join_args will have either 0 or 2 or more,
# never 1 element
joiner = q.join_args[-2]
# Find the intermediate record that will connect +new_value+
# to wrapped_obj
next_up = model_from_name(joiner['table'])
next_r = query.Query(next_up, record=wrapped_obj).joins(
table).where(**{table: {'id': wrapped_obj.id}}
).first()
if not model_has_foreign_key_for_table(joiner['table'],
child):
# The intermediate record has the foreign key: set it
if new_value is None:
setattr(next_r,
joiner['on'][0],
None)
else:
setattr(next_r,
joiner['on'][0],
getattr(new_value, joiner['on'][1]))
wrapped_obj._related_records.append(next_r)
else:
# Set the foreign key on the new value
if new_value is not None:
# Associate new value
setattr(new_value,
joiner['on'][1], # Foreign key
# Lookup the id/foreign_key of the record
getattr(next_r, joiner['on'][0]))
wrapped_obj._related_records.append(new_value)
# Disassociate the old value
if old_value is not None:
setattr(old_value,
joiner['on'][1], # Foreign key
None)
wrapped_obj._related_records.append(old_value)
else:
def child_record_method(wrapped_obj):
child = model_from_name(self.child_name)
q = query.Query(child, record=wrapped_obj)
where_statement = {self.foreign_key: wrapped_obj.id}
return q.where(**where_statement).first()
def set_child_record_method(wrapped_obj, child):
_verify_type_match(child, self.child_name)
# We are setting a child: set its foreign key to our id
if child is not None:
setattr(child, self.foreign_key, wrapped_obj.id)
wrapped_obj._related_records.append(child)
# disassociate old record
old_value = child_record_method(wrapped_obj)
if old_value is not None:
setattr(old_value, self.foreign_key, None)
wrapped_obj._related_records.append(old_value)
setattr(klass, self.child_name, property(child_record_method,
set_child_record_method))
return klass
|
class MissingVariableError(Exception):
def __init__(self, name):
self.name = name
self.message = f'The required variable "{self.name}" is missing'
super().__init__(self.message)
class ReservedVariableError(Exception):
def __init__(self, name):
self.name = name
self.message = (
f'The variable"{self.name}" is reserved and should only be set by combine'
)
super().__init__(self.message)
|
#!/usr/bin/env python
"""
Test support for 'with' statements
"""
# Copyright (C) 2009 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
import unittest
import settings
from amqp import Connection, Message
class TestChannel(unittest.TestCase):
def test_with(self):
with Connection(**settings.connect_args) as conn:
self.assertEqual(conn.transport is None, False)
with conn.channel(1) as ch:
self.assertEqual(1 in conn.channels, True)
#
# Do something with the channel
#
ch.exchange_declare('unittest.fanout', 'fanout',
auto_delete=True)
msg = Message(
'unittest message',
content_type='text/plain',
application_headers={'foo': 7, 'bar': 'baz'},
)
ch.basic_publish(msg, 'unittest.fanout')
#
# check that the channel was closed
#
self.assertEqual(1 in conn.channels, False)
self.assertEqual(ch.is_open, False)
#
# Check that the connection was closed
#
self.assertEqual(conn.transport, None)
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(TestChannel)
unittest.TextTestRunner(**settings.test_args).run(suite)
if __name__ == '__main__':
main()
|
# coding=utf-8
from wirecamel import CONF_DIR
import re
import subprocess
import yaml
XTERM_TITLE = 'Hostapd console'
# Load hostapd configuration
def load_config():
hostapd_options = {}
with open(CONF_DIR, 'r') as hap_file:
for line in hap_file.readlines():
elements = re.findall(r'(.*)=(.*)\n', line)
if len(elements) != 0:
hostapd_options[elements[0][0]] = elements[0][1]
return hostapd_options
# Save hostapd configuration
def save_config(config):
yaml.dump(config,
stream=open('{0}/hostapd.yaml'.format(CONF_DIR), 'w'),
default_flow_style=False)
# Start hostapd
def start(xterm=True):
if xterm:
return subprocess.Popen(
['xterm', '-T', XTERM_TITLE, '-hold', '-e', 'hostapd', '-d', CONF_DIR]
)
else:
return subprocess.Popen(
['hostapd', '-d', CONF_DIR]
)
|
#!/usr/bin/python3
# taken from https://github.com/AcutronicRobotics/mara_examples
import rclpy
from multiprocessing import Process
from rclpy.node import Node
from rclpy.qos import qos_profile_sensor_data
from hrim_actuator_rotaryservo_msgs.msg import GoalRotaryServo
from hrim_actuator_gripper_srvs.srv import ControlFinger
class Gripper(Node):
def __init__(self):
super().__init__('mara_minimal_client')
# Create a client for service "/hrim_actuation_gripper_000000000004/goal"
self.client = self.create_client(ControlFinger, "/hrim_actuator_gripper_000000000004/fingercontrol")
# Wait for service to be avaiable before calling it
while not self.client.wait_for_service(timeout_sec=1.0):
self.get_logger().info('service not available, waiting again...')
# Create request with the same type as the service, ControlFinger
self.req = ControlFinger.Request()
def send_request(self):
self.future = self.client.call_async(self.req)
@staticmethod
def run(cmd_queue):
rclpy.init(args=None)
node = Gripper()
node.req.goal_velocity = 9999.
while True:
node.req.goal_angularposition = cmd_queue.get()
# Call service and spin
node.send_request()
rclpy.spin_until_future_complete(node, node.future)
class JointMove(Node):
def __init__(self, x, y, cmd_queue):
# Initialize Node with name "mara_minimal_publisher"
super().__init__('mara_minimal_publisher' + "_" + str(x) + "_" + str(y))
# Create a publisher on topic "/hrim_actuation_servomotor_000000000001/goal_axis1"
# !TODO one class controls all six joints
self.pub_ = self.create_publisher(GoalRotaryServo, '/hrim_actuator_rotaryservo_00000000000' + str(x) + '/goal_axis' + str(y),
qos_profile=qos_profile_sensor_data)
# Create message with the same type as the topic, GoalRotaryServo
self.msg = GoalRotaryServo()
# Create a timer to publish the messages periodically
timer_period = 1.0 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.cmd_queue = cmd_queue
self.cmd = None
def timer_callback(self):
# Fill message content
if self.cmd is None or not self.cmd_queue.empty():
self.cmd = self.cmd_queue.get()
self.msg.position = self.cmd * 3.1416/180 # Position to rads
self.msg.velocity = 30. # Velocity in rads/s
self.msg.control_type = 4 # Position and velocity control
# Publish message!
self.pub_.publish(self.msg)
@staticmethod
def run(x, y, cmd_queue):
rclpy.init(args=None)
minimal_publisher = JointMove(x, y, cmd_queue)
rclpy.spin(minimal_publisher)
minimal_publisher.destroy_node()
rclpy.shutdown()
def main(args=None):
# cmd_queue = Queue()
# cmd_queue.put(0.7)
# cmd_queue.put(0.05)
#
# #processes = [Process(target=JointMove.run, args=(i, j)) for i in range(1, 4) for j in range(1, 3)]
# processes = []
# processes.append(Process(target=Gripper.run, args=(cmd_queue,)))
#
# for process in processes:
# process.start()
#
# for process in processes:
# process.join()
#
#
pass
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
"""
Unit Tests for DynamicArray
TODO - Add doctests to source
"""
import pytest
from DynamicArray import DynamicArray
class TestDynamicArray():
@classmethod
def setup_class(cls):
cls.dynamicArray = DynamicArray()
@classmethod
def teardown_class(cls):
if cls.dynamicArray is not None:
del cls.dynamicArray
# Can we instantiate DynamicArray.
def test_can_DynamicArray_be_instantiated(self):
assert self.dynamicArray is not None, ValueError
# Can we insert items into DynamicArray.
def test_items_can_be_inserted_to_DynamicArray(self):
# Before the insert
assert len(self.dynamicArray) == 0
self.dynamicArray.insert(1)
# Verify length of DynamicArray changes.
assert len(self.dynamicArray) == 1
# Does the capacity of DynamicArray double when elements == capacity.
def test_capacity_doubles_when_elements_equal_capacity(self):
# First we need to verify how many items are in DynamicArray
assert len(self.dynamicArray) == 1
assert self.dynamicArray.capacity == 1
# Inserting another element to DynamicArray should cause the capacity to double.
self.dynamicArray.insert(2) # O(1)
assert self.dynamicArray.capacity == 2 # Capacity is doubled here. O(N)
self.dynamicArray.insert(3)
assert self.dynamicArray.capacity == 4 # Capacity is doubled here. O(N)
self.dynamicArray.insert(4)
assert self.dynamicArray.capacity == 4
self.dynamicArray.insert(5)
assert self.dynamicArray.capacity == 8 # Capacity is doubled here. O(N)
# Are multiple types prevented from being inserted to DynamicArray.
def test_DynamicArray_can_only_hold_single_type(self):
# To maintain O(1) insertion time DynamicArray must have a single type because of how they are stored in memory.
with pytest.raises(TypeError):
self.dynamicArray.insert("badType")
if __name__ == "__main__":
pytest.main()
|
from tests.system.action.base import BaseActionTestCase
class MotionStatuteParagraphSortActionTest(BaseActionTestCase):
def test_sort_correct_1(self) -> None:
self.create_model("meeting/222", {"name": "name_SNLGsvIV"})
self.create_model(
"motion_statute_paragraph/31", {"meeting_id": 222, "title": "title_loisueb"}
)
self.create_model(
"motion_statute_paragraph/32",
{"meeting_id": 222, "title": "title_blanumop"},
)
response = self.client.post(
"/",
json=[
{
"action": "motion_statute_paragraph.sort",
"data": [{"meeting_id": 222, "statute_paragraph_ids": [32, 31]}],
}
],
)
self.assert_status_code(response, 200)
model_31 = self.get_model("motion_statute_paragraph/31")
assert model_31.get("weight") == 2
model_32 = self.get_model("motion_statute_paragraph/32")
assert model_32.get("weight") == 1
def test_sort_missing_model(self) -> None:
self.create_model("meeting/222", {"name": "name_SNLGsvIV"})
self.create_model(
"motion_statute_paragraph/31", {"meeting_id": 222, "title": "title_loisueb"}
)
response = self.client.post(
"/",
json=[
{
"action": "motion_statute_paragraph.sort",
"data": [{"meeting_id": 222, "statute_paragraph_ids": [32, 31]}],
}
],
)
self.assert_status_code(response, 400)
assert "Id 32 not in db_instances." in str(response.data)
def test_sort_another_section_db(self) -> None:
self.create_model("meeting/222", {"name": "name_SNLGsvIV"})
self.create_model(
"motion_statute_paragraph/31", {"meeting_id": 222, "title": "title_loisueb"}
)
self.create_model(
"motion_statute_paragraph/32",
{"meeting_id": 222, "title": "title_blanumop"},
)
self.create_model(
"motion_statute_paragraph/33",
{"meeting_id": 222, "title": "title_polusiem"},
)
response = self.client.post(
"/",
json=[
{
"action": "motion_statute_paragraph.sort",
"data": [{"meeting_id": 222, "statute_paragraph_ids": [32, 31]}],
}
],
)
self.assert_status_code(response, 400)
assert "Additional db_instances found." in str(response.data)
|
from .testing_deployment import RobotpkgTests
from .dockerfile import RobotpkgGenerateDockerFile
from .src_introspection import RobotpkgSrcIntrospection,RobotpkgPackage
from .utils import execute,execute_call,init_environment_variables
from .handling_imgs import HandlingImgs
from .package_release_candidate import RobotpkgPkgReleaseCandidate
from .analyze_computer import AnalyzeComputer
from .package_release_candidate import RobotpkgPkgReleaseCandidate
from .architecture_release_candidate import RobotpkgArchitectureReleaseCandidate
from .makefile_lark_parser import lark_parse_makefile
|
"""
Classification-based gym environment
It is based on
https://github.com/deepmind/neural_testbed/blob/master/neural_testbed/generative/classification_envlikelihood.py
"""
import jax.numpy as jnp
import chex
from jax import jit
import haiku as hk
from gym import Env, spaces
from typing import Callable, Any
from jsl.gym_envs.envs.base import sample_gaussian_cls_data, categorical_log_likelihood
class ClassificationEnv(Env):
def __init__(self,
apply_fn: Callable,
x_train_generator: Callable,
x_test_generator: Callable,
prior_knowledge: Any,
train_batch_size: int,
test_batch_size:int,
nsteps: int,
key: chex.PRNGKey,
sample_fn: Callable = sample_gaussian_cls_data):
super(ClassificationEnv, self).__init__()
# Key sequences
self.rng = hk.PRNGSequence(key)
self.apply_fn = apply_fn
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.x_train_generator = x_train_generator
self.x_test_generator = x_test_generator
self.nsteps = nsteps
if sample_fn is None:
self.sample_fn = sample_gaussian_cls_data
else:
self.sample_fn = sample_fn
self.tau = prior_knowledge.tau
self.input_dim = prior_knowledge.input_dim
self.t = 0
# Environment OpenAI metadata
self.reward_range = spaces.Discrete(prior_knowledge.output_dim)
self.action_space = spaces.MultiDiscrete([prior_knowledge.output_dim] * train_batch_size)
self.observation_space = {
"X_train":spaces.Box(low=-jnp.inf, high=jnp.inf,
shape=(train_batch_size, self.input_dim), dtype=jnp.float32),
"y_train":spaces.Box(low=-jnp.inf, high=jnp.inf,
shape=(train_batch_size, 1), dtype=jnp.float32),
"X_test": spaces.Box(low=-jnp.inf, high=jnp.inf,
shape=(test_batch_size, self.input_dim), dtype=jnp.float32),
"y_test": spaces.Box(low=-jnp.inf, high=jnp.inf,
shape=(test_batch_size, 1), dtype=jnp.float32)
}
@property
def done(self):
return self.t >= self.x_train.shape[0]
def step(self, action):
done = self.done
info = {}
reward = -categorical_log_likelihood(self.train_probs, action)
self.t += 1
if done:
observation = {}
else:
observation = {
"X_train": self.x_train[self.t],
"y_train": self.y_train[self.t],
"X_test": self.x_test[self.t],
"y_test": self.y_test[self.t]
}
return observation, reward, done, info
def _initialize_data(self):
nsamples = self.nsteps * self.train_batch_size
(x_train, y_train), train_probs, _ = self.sample_fn(
self.apply_fn, self.x_train_generator,
nsamples, next(self.rng))
self.x_train = x_train.reshape((-1, self.train_batch_size, self.input_dim))
self.train_probs = train_probs
self.y_train = y_train.reshape((-1, self.train_batch_size, 1))
(x_test, y_test), train_probs, _ = self.sample_fn(
self.apply_fn, self.x_train_generator,
nsamples, next(self.rng))
self.x_test = x_test.reshape((-1, self.test_batch_size, self.input_dim))
self.y_test = y_test.reshape((-1, self.test_batch_size, 1))
def reset(self):
self._initialize_data()
self.t = 0
# Returns the current state
return {"X_train": self.x_train[self.t],
"y_train": self.y_train[self.t],
"X_test": self.x_test[self.t],
"y_test": self.y_test[self.t]}
def test_data(self, key: chex.PRNGKey):
"""Generates test data and evaluates log likelihood w.r.t. environment.
The test data that is output will be of length tau examples.
We wanted to "pass" tau here... but ran into jit issues.
Args:
key: Random number generator key.
Returns:
Tuple of data (with tau examples) and log-likelihood under posterior.
"""
def sample_test(k: chex.PRNGKey):
(x_train, y_train), _, log_likelihood = sample_gaussian_cls_data(
self.apply_fn, self.x_test_generator, self.tau, key=k)
return (x_train, y_train), log_likelihood
return jit(sample_test)(key)
def render(self):
pass
|
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
df=pd.read_csv(path)
# Code starts here
X=df.drop(['customerID','Churn'],1)
y=df['Churn'].copy()
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size = 0.3,random_state = 0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['TotalCharges']=X_train['TotalCharges'].replace(' ',np.NaN).astype(float)
X_test['TotalCharges']=X_test['TotalCharges'].replace(' ',np.NaN).astype(float)
X_train['TotalCharges']=X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean())
X_test['TotalCharges']=X_test['TotalCharges'].fillna(X_test['TotalCharges'].mean())
X_train.isnull().sum()
cat_cols = X_train.select_dtypes(include='O').columns.tolist()
#Label encoding train data
for x in cat_cols:
le = LabelEncoder()
X_train[x] = le.fit_transform(X_train[x])
cate_cols = X_test.select_dtypes(include='O').columns.tolist()
#Label encoding train data
for x in cate_cols:
le = LabelEncoder()
X_test[x] = le.fit_transform(X_test[x])
y_train=y_train.replace({'No':0, 'Yes':1})
y_test=y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
print(X_train)
print(X_test)
print(y_train)
print(y_test)
ada_model=AdaBoostClassifier(random_state=0)
ada_model.fit(X_train,y_train)
y_pred=ada_model.predict(X_test)
ada_score=accuracy_score(y_test,y_pred)
ada_cm=confusion_matrix(y_test,y_pred)
ada_cr=classification_report(y_test,y_pred)
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
#Initializing the model
xgb_model = XGBClassifier(random_state=0)
#Fitting the model on train data
xgb_model.fit(X_train,y_train)
#Making prediction on test data
y_pred = xgb_model.predict(X_test)
#Finding the accuracy score
xgb_score = accuracy_score(y_test,y_pred)
print("Accuracy: ",xgb_score)
#Finding the confusion matrix
xgb_cm=confusion_matrix(y_test,y_pred)
print('Confusion matrix: \n', xgb_cm)
#Finding the classification report
xgb_cr=classification_report(y_test,y_pred)
print('Classification report: \n', xgb_cr)
### GridSearch CV
#Initialsing Grid Search
clf = GridSearchCV(xgb_model, parameters)
#Fitting the model on train data
clf.fit(X_train,y_train)
#Making prediction on test data
y_pred = clf.predict(X_test)
#Finding the accuracy score
clf_score = accuracy_score(y_test,y_pred)
print("Accuracy: ",clf_score)
#Finding the confusion matrix
clf_cm=confusion_matrix(y_test,y_pred)
print('Confusion matrix: \n', clf_cm)
#Finding the classification report
clf_cr=classification_report(y_test,y_pred)
print('Classification report: \n', clf_cr)
#Code ends here
|
import os
import sys
class Display(object):
def __init__(self):
pass
@staticmethod
def header():
sys.stdout.write("╔═════════════╦═══════════════════════╗".center(os.get_terminal_size().columns))
sys.stdout.write("║ ║ Server Provisioning ║".center(os.get_terminal_size().columns))
sys.stdout.write("║ Payload ╟───────────────────────╢".center(os.get_terminal_size().columns))
sys.stdout.write("║ ║ © Cloud Hybrid ║".center(os.get_terminal_size().columns))
sys.stdout.write("╟─────────────╨───────────────────────╢".center(os.get_terminal_size().columns))
sys.stdout.write("║ development.cloudhybrid@gmail.com ║".center(os.get_terminal_size().columns))
sys.stdout.write("╟───────────────────────────┬─────────╢".center(os.get_terminal_size().columns))
sys.stdout.write("║ cloudhybrid.io │ Snow ║".center(os.get_terminal_size().columns))
sys.stdout.write("╚═══════════════════════════╧═════════╝".center(os.get_terminal_size().columns))
@staticmethod
def copyright():
sys.stdout.write("╔══════════════════╦══════════════════╗".center(os.get_terminal_size().columns))
sys.stdout.write("║ Copyright © 2019 ║ Jacob B. Sanders ║".center(os.get_terminal_size().columns))
sys.stdout.write("╚══════════════════╩══════════════════╝".center(os.get_terminal_size().columns))
|
import sys
import os
sys.path.append(os.getcwd() + '/src')
import unittest
import utils
class TestUtils(unittest.TestCase):
def test_createTabulaRecta(self):
tabulaRecta = utils.createTabulaRecta()
table = ''
for l in tabulaRecta:
table += ' '.join(l) + "\n"
print table
self.assertEqual(tabulaRecta[25][25], 'y', "tabulaRecta not created correctly:\n" + table)
def test_lookUpTabulaRecta(self):
encryptChar = utils.lookUpTabulaRecta('J', 'Q', False)
self.assertEqual(encryptChar, 'z', 'Mis-encrypted J as %s using key character Q' % encryptChar)
decryptChar = utils.lookUpTabulaRecta('Z', 'Q', True)
self.assertEqual(decryptChar, 'j', 'Mis-decrypted Z as %s using key character Q' % decryptChar)
def test_getNPeriodCharacters(self):
text = 'vptnvffuntshtarptymjwzirappljmhhqvsubwlzzygvtyitarptyiougxiuydtgzhhvvmum'
n4l2 = 'vpvfnttatywzapjmqvbwzytyaryigxydzhvm'
n2l1 = 'vtvfnstrtmwiapjhqsblzgtiapyogiytzhvu'
n3l2o4 = 'vfunshartyjwirppjmhqsuwlzyvtitrpyiugiudtzhvvum'
result = utils.getNPeriodCharacters(4, text, 2)
self.assertEqual(result, n4l2, "Failed with period of 4 and length of 2.\nExpected:\n%s\nGot:\n%s" % (n4l2, result))
result = utils.getNPeriodCharacters(2, text, 1)
self.assertEqual(result, n2l1, "Failed with period of 2 and length of 1.\nExpected:\n%s\nGot:\n%s" % (n2l1, result))
result = utils.getNPeriodCharacters(3, text, 2, 4)
self.assertEqual(result, n3l2o4, "Failed with period of 3 a length of 2 and an offset of 4.\nExpected:\n%s\nGot:\n%s" % (n3l2o4, result))
if __name__ == '__main__':
unittest.main()
|
import sys
import numpy as np
import torch
from tqdm import tqdm
from codes.analysers.graphmask.graphmask_probe import GraphMaskProbe
from codes.analysers.information_bottleneck.information_bottleneck_probe import InformationBottleneckProbe
from codes.utils.moving_average import MovingAverage
from codes.utils.torch_utils.lagrangian_optimization import LagrangianOptimization
class InformationBottleneckAnalyser:
probe = None
moving_average_window_size = 100
def __init__(self, configuration):
self.configuration = configuration
def obtain_message_mean_and_var(self, model, problem):
batch_size = 1
stat_tensor = []
with torch.no_grad():
model.eval()
problem.initialize_epoch()
batch_iterator = tqdm(problem.iterate_batches(batch_size=batch_size, split="dev"),
total=problem.approximate_batch_count(batch_size=batch_size, split="dev"),
dynamic_ncols=True,
smoothing=0.0,
desc="Retrieving message statistics from the dev set.")
for i, batch in enumerate(batch_iterator):
model(batch)
latest_messages = np.array([m.detach().numpy() for m in model.get_gnn().get_latest_messages()])
stat_tensor.append(latest_messages)
stat_tensor = np.concatenate(stat_tensor, axis=1)
return np.array([stat_tensor.mean(axis=1), stat_tensor.var(axis=1)])
def initialise_for_model(self, model, problem):
vertex_embedding_dims = model.get_gnn().get_vertex_embedding_dims()
message_dims = model.get_gnn().get_message_dims()
message_mean_and_var = self.obtain_message_mean_and_var(model, problem)
self.probe = InformationBottleneckProbe(vertex_embedding_dims, message_dims, message_dims, message_mean_and_var)
def validate(self, model, problem, split="test", gpu_number=-1):
threshold = self.configuration["analysis"]["parameters"]["threshold"]
problem.evaluator.set_mode(split)
device = torch.device('cuda:' + str(gpu_number) if torch.cuda.is_available() and gpu_number >= 0 else 'cpu')
self.probe.set_device(device)
model.set_device(device)
batch_size = 1
with torch.no_grad():
model.eval()
self.probe.eval()
problem.initialize_epoch()
score_moving_average = MovingAverage(window_size=self.moving_average_window_size)
sparsity_moving_average = MovingAverage(window_size=self.moving_average_window_size)
batch_iterator = tqdm(problem.iterate_batches(batch_size=batch_size, split=split),
total=problem.approximate_batch_count(batch_size=batch_size, split=split),
dynamic_ncols=True,
smoothing=0.0)
original_all_stats = []
gated_all_stats = []
all_gates = 0
all_messages = 0
for i, batch in enumerate(batch_iterator):
_, original_predictions = model(batch)
for p, e in zip(original_predictions, batch):
original_score = problem.evaluator.score_example(p)
stats = problem.evaluator.get_stats(p)
original_all_stats.append(stats)
gates, baselines, _ = self.probe(model.get_gnn())
model.get_gnn().inject_message_scale(gates)
model.get_gnn().inject_message_replacement(baselines)
_, predictions = model(batch)
gates = [g > threshold for g in gates]
for p, e in zip(predictions, batch):
gated_score = problem.evaluator.score_example(p)
stats = problem.evaluator.get_stats(p)
gated_all_stats.append(stats)
score_diff = abs(float(gated_score - original_score))
score_moving_average.register(score_diff)
all_gates += float(sum([g.sum().detach() for g in gates]))
all_messages += float(model.get_gnn().count_latest_messages())
batch_sparsity = float(sum([g.sum().detach() for g in gates])/model.get_gnn().count_latest_messages())
sparsity_moving_average.register(batch_sparsity)
batch_iterator.set_description("Evaluation mean score difference={0:.4f}, mean retained={1:.4f}".format(
score_moving_average.get_value(),
sparsity_moving_average.get_value()))
original_true_score = problem.evaluator.evaluate_stats(original_all_stats, split)
gated_true_score = problem.evaluator.evaluate_stats(gated_all_stats, split)
print("Information bottleneck comparison on the "+split+"-split:")
print("======================================")
print("Original test score: " + str(original_true_score))
print("Gated test score: " + str(gated_true_score))
print("Retained messages: " + str(all_gates / all_messages))
diff = np.abs(original_true_score - gated_true_score)
percent_div = float(diff / (original_true_score + 1e-8))
sparsity = float(all_gates / all_messages)
return percent_div, sparsity
def fit(self, model, problem, gpu_number=-1):
batch_size = self.configuration["analysis"]["parameters"]["batch_size"]
epochs_per_layer = self.configuration["analysis"]["parameters"]["epochs_per_layer"]
train_split = self.configuration["analysis"]["parameters"]["train_split"]
test_every_n = self.configuration["analysis"]["parameters"]["test_every_n"]
save_path = self.configuration["analysis"]["parameters"]["save_path"]
penalty_scaling = self.configuration["analysis"]["parameters"]["penalty_scaling"]
learning_rate = self.configuration["analysis"]["parameters"]["learning_rate"]
allowance = self.configuration["analysis"]["parameters"]["allowance"]
max_allowed_performance_diff = self.configuration["analysis"]["parameters"]["max_allowed_performance_diff"]
load = self.configuration["analysis"]["parameters"]["load"]
train = self.configuration["analysis"]["parameters"]["train"]
if load:
self.probe.load(save_path)
if train:
if "batch_size_multiplier" in self.configuration["analysis"]["parameters"] and \
self.configuration["analysis"]["parameters"]["batch_size_multiplier"] > 1:
batch_size_multiplier = self.configuration["analysis"]["parameters"]["batch_size_multiplier"]
else:
batch_size_multiplier = None
optimizer = torch.optim.Adam(self.probe.parameters(), lr=learning_rate)
device = torch.device('cuda:' + str(gpu_number) if torch.cuda.is_available() and gpu_number >= 0 else 'cpu')
self.probe.set_device(device)
model.set_device(device)
# Lagrange optimization is not originally a part of the IB approach, but we can get much more consistent results by using it
lagrangian_optimization = LagrangianOptimization(optimizer,
device,
batch_size_multiplier=batch_size_multiplier)
f_moving_average = MovingAverage(window_size=self.moving_average_window_size)
g_moving_average = MovingAverage(window_size=self.moving_average_window_size)
best_sparsity = 1.01
for epoch in range(epochs_per_layer):
problem.evaluator.set_mode("train")
problem.initialize_epoch()
batch_iterator = tqdm(problem.iterate_batches(batch_size=batch_size, split=train_split),
total=problem.approximate_batch_count(batch_size=batch_size,
split=train_split),
dynamic_ncols=True,
smoothing=0.0)
for i, batch in enumerate(batch_iterator):
self.probe.train()
loss, predictions, penalty = self.compute_loss(batch, model, problem)
g = torch.relu(loss - allowance).mean()
f = penalty * penalty_scaling
lagrangian_optimization.update(f, g)
f_moving_average.register(float(f))
g_moving_average.register(float(loss.mean()))
batch_iterator.set_description(
"Running epoch {0:n} of GraphMask training. Mean divergence={1:.4f}, mean penalty={2:.4f}".format(
epoch,
g_moving_average.get_value(),
f_moving_average.get_value()))
if (epoch + 1) % test_every_n == 0:
percent_div, sparsity = self.validate(model, problem, split="dev", gpu_number=gpu_number)
if percent_div < max_allowed_performance_diff and sparsity < best_sparsity:
print("Found better probe with sparsity={0:.4f}. Keeping these parameters.".format(sparsity),
file=sys.stderr)
best_sparsity = sparsity
self.probe.save(save_path)
# Load the best probe:
self.probe.load(save_path)
def compute_loss(self, batch, model, problem):
model.eval()
_, original_predictions = model(batch)
model.train() # Enable any dropouts in the original model. We found this helpful for training GraphMask.
self.probe.train()
batch = problem.overwrite_labels(batch, original_predictions)
gates, baselines, penalty = self.probe(model.get_gnn())
model.get_gnn().inject_message_scale(gates)
model.get_gnn().inject_message_replacement(baselines)
loss, predictions = model(batch)
return loss, predictions, penalty
def analyse(self, batch, model, problem):
threshold = self.configuration["analysis"]["parameters"]["threshold"] # 0.3 or 0.4 worked well for us, but it may take a few restarts to get a good solution
model.eval()
self.probe.eval()
_, original_predictions = model(batch)
gates, _, _ = self.probe(model.get_gnn())
return [g > threshold for g in gates]
|
import os
import typing
from json import decoder, encoder
from Mod_NeonOcean_S4_Order import Mod
from Mod_NeonOcean_S4_Order.Tools import Package
def BuildPackageChanges () -> bool:
if not Package.CanBuildPackage():
return False
for package in Mod.GetCurrentMod().Packages: # type: Mod.Package
baseFileExists = os.path.exists(package.SourceBaseFilePath) # type: bool
loosePathExists = os.path.exists(package.SourceLoosePath) # type: bool
packageManifest = None # type: typing.Optional[typing.Dict[str, typing.Union[float, typing.Dict[str, float]]]]
if not os.path.exists(package.BuildFilePath):
_BuildPackageEverythingInternal(package)
return True
if os.path.exists(package.BuildManifestFilePath):
with open(package.BuildManifestFilePath) as packageManifestFile:
packageManifest = decoder.JSONDecoder().decode(packageManifestFile.read())
if packageManifest is not None and not isinstance(packageManifest, dict):
packageManifest = None
if packageManifest is not None and (not "Loose" in packageManifest or not "Base" in packageManifest):
packageManifest = None
if packageManifest is None:
_BuildPackageEverythingInternal(package)
return True
else:
filesChanged = False # type: bool
if baseFileExists:
baseCurrentChangeTime = os.path.getmtime(package.SourceBaseFilePath) # type: float
if packageManifest["Base"] != os.path.getmtime(package.SourceBaseFilePath):
packageManifest["Base"] = baseCurrentChangeTime
filesChanged = True
if loosePathExists:
packageManifestLooseDictionary = packageManifest["Loose"] # type: dict
for entryFileName in list(packageManifestLooseDictionary.keys()): # type: str
entryChangeTime = packageManifestLooseDictionary[entryFileName] # type: float
entryFilePath = os.path.join(package.SourceLoosePath, entryFileName) # type: str
if not os.path.exists(entryFilePath):
packageManifestLooseDictionary.pop(entryFileName)
filesChanged = True
continue
entryCurrentChangeTime = os.path.getmtime(entryFilePath) # type: float
if entryCurrentChangeTime != entryChangeTime:
packageManifest["Loose"][entryFileName] = entryCurrentChangeTime
filesChanged = True
for sourceDirectoryRoot, sourceDirectoryNames, sourceFileNames in os.walk(package.SourceLoosePath): # type: str, typing.List[str], typing.List[str]
for sourceFileName in sourceFileNames: # type: str
sourceFilePath = os.path.join(sourceDirectoryRoot, sourceFileName) # type: str
relativeSourceFilePath = os.path.relpath(sourceFilePath, package.SourceLoosePath) # type: str
sourceFileDuplicate = False # type: bool
for entryFileName in packageManifest["Loose"].keys(): # type: str
if entryFileName.lower() == relativeSourceFilePath.lower():
sourceFileDuplicate = True
break
if not sourceFileDuplicate:
packageManifest["Loose"][relativeSourceFilePath] = os.path.getmtime(sourceFilePath)
filesChanged = True
if filesChanged:
addingFilePaths = list() # type: typing.List[str]
if loosePathExists:
for sourceDirectoryRoot, sourceDirectoryNames, sourceFileNames in os.walk(package.SourceLoosePath): # type: str, typing.List[str], typing.List[str]
for sourceFileName in sourceFileNames: # type: str
# noinspection SpellCheckingInspection
if os.path.splitext(sourceFileName)[1].lower() == ".sourceinfo":
continue
sourceFilePath = os.path.join(sourceDirectoryRoot, sourceFileName) # type: str
if os.path.isfile(sourceFilePath):
addingFilePaths.append(sourceFilePath)
if baseFileExists:
Package.BuildPackage(package.BuildFilePath,
baseFilePath = package.SourceBaseFilePath,
addingFilePaths = addingFilePaths)
else:
Package.BuildPackage(package.BuildFilePath,
addingFilePaths = addingFilePaths)
with open(package.BuildManifestFilePath, "w+") as packageManifestFile:
packageManifestFile.write(encoder.JSONEncoder(indent = "\t").encode(packageManifest))
return True
def BuildPackageEverything () -> bool:
if not Package.CanBuildPackage():
return False
for package in Mod.GetCurrentMod().Packages: # type: Mod.Package
_BuildPackageEverythingInternal(package)
return True
def _BuildPackageEverythingInternal (package: Mod.Package) -> None:
baseFileExists = os.path.exists(package.SourceBaseFilePath) # type: bool
loosePathExists = os.path.exists(package.SourceLoosePath) # type: bool
packageManifest = dict() # type: typing.Dict[str, typing.Union[float, typing.Dict[str, float]]]
if baseFileExists:
packageManifest["Base"] = os.path.getmtime(package.SourceBaseFilePath)
else:
packageManifest["Base"] = -1
packageManifest["Loose"] = dict()
for sourceDirectoryRoot, sourceDirectoryNames, sourceFileNames in os.walk(package.SourceLoosePath): # type: str, typing.List[str], typing.List[str]
for sourceFileName in sourceFileNames: # type: str
sourceFilePath = os.path.join(sourceDirectoryRoot, sourceFileName) # type: str
relativeSourceFilePath = os.path.relpath(sourceFilePath, package.SourceLoosePath) # type: str
packageManifest["Loose"][relativeSourceFilePath] = os.path.getmtime(sourceFilePath)
addingFilePaths = list() # type: typing.List[str]
if loosePathExists:
for sourceDirectoryRoot, sourceDirectoryNames, sourceFileNames in os.walk(package.SourceLoosePath): # type: str, typing.List[str], typing.List[str]
for sourceFileName in sourceFileNames: # type: str
# noinspection SpellCheckingInspection
if os.path.splitext(sourceFileName)[1].lower() == ".sourceinfo":
continue
sourceFilePath = os.path.join(sourceDirectoryRoot, sourceFileName) # type: str
if os.path.isfile(sourceFilePath):
addingFilePaths.append(sourceFilePath)
if baseFileExists:
Package.BuildPackage(package.BuildFilePath,
baseFilePath = package.SourceBaseFilePath,
addingFilePaths = addingFilePaths)
else:
Package.BuildPackage(package.BuildFilePath,
addingFilePaths = addingFilePaths)
with open(package.BuildManifestFilePath, "w+") as packageManifestFile:
packageManifestFile.write(encoder.JSONEncoder(indent = "\t").encode(packageManifest))
|
from __future__ import annotations
import ast
import logging
import os
import requests
from pydantic import ValidationError
from sutta_publisher.shared.value_objects.edition_config import EditionConfig, EditionMappingList, EditionsConfigs
API_URL = os.getenv("API_URL", "")
API_ENDPOINTS = ast.literal_eval(os.getenv("API_ENDPOINTS", ""))
CREATOR_BIOS_URL = os.getenv("CREATOR_BIOS_URL", "")
def get_editions_ids(publication_number: str) -> list[str]:
"""Get the editions that are for given `publication_number`."""
response = requests.get(API_URL + API_ENDPOINTS["editions_mapping"])
response.raise_for_status()
payload = response.content
editions = EditionMappingList.parse_raw(payload)
return editions.get_editions_id(publication_number=publication_number) # type: ignore
def get_edition_config(edition_id: str) -> EditionConfig:
"""Fetch config for a given edition."""
response = requests.get(API_URL + API_ENDPOINTS["specific_edition"].format(edition_id=edition_id))
response.raise_for_status()
payload = response.content.decode("utf-8")
config = EditionConfig.parse_raw(payload)
# We need to set creator_bio separately as it comes from a different source
bios_response = requests.get(CREATOR_BIOS_URL)
bios_response.raise_for_status()
creators_bios: list[dict[str, str]] = bios_response.json()
try:
(target_bio,) = [bio for bio in creators_bios if bio["creator_uid"] == config.publication.creator_uid]
config.publication.creator_bio = target_bio["creator_biography"]
except ValueError:
# raise SystemExit(f"No creator's biography found for: {config.publication.creator_uid}. Stopping.")
# TODO: DELETE THESE LINES
logging.error(f"No creator's biography found for: {config.publication.creator_uid}.")
config.publication.creator_bio = {
"creator_uid": config.publication.creator_uid,
"creator_biography": f"No creator's biography found for: {config.publication.creator_uid}. To be written.",
}
# TODO: UNCOMMENT THIS
# config.publication.creator_bio = target_bio["creator_biography"]
return config
def get_editions_configs(publication_number: str) -> EditionsConfigs:
"""Build a list of available editions config."""
editions_id: list[str] = get_editions_ids(publication_number=publication_number)
editions_config = EditionsConfigs()
for each_id in editions_id:
try:
editions_config.append(get_edition_config(edition_id=each_id))
except ValidationError as err:
messages = ["Unsupported edition type found. Skipping to next one. Details:"]
for idx, error in enumerate(err.errors()):
error_location = " -> ".join(str(module) for module in error["loc"])
messages.append(f'[{idx+1}] {error_location}: {error["msg"]} ({error["type"]})')
logging.warning(" ".join(messages))
if not editions_config:
raise SystemExit(f"No valid edition configs found for {publication_number=}. Stopping.")
return editions_config
def setup_logging() -> None:
log_format = "[%(levelname)7s] %(filename)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format, datefmt="%Y-%m-%d %H:%M:%S")
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url( r'^facturacion/factura/print/(?P<idFactura>[0-9]+)', 'facturacion.views.printFactura', name="printFactura"),
) |
import pytest
import payload as pl
from .fixtures import Fixtures
@pytest.fixture()
def billing_schedule(processing_account, customer_account):
billing_schedule = pl.BillingSchedule.create(
start_date="2019-01-01",
end_date="2019-12-31",
recurring_frequency="monthly",
type="subscription",
customer_id=customer_account.id,
processing_id=processing_account.id,
charges=pl.BillingCharge(type="option_1", amount=39.99),
)
return billing_schedule
class TestBilling(Fixtures):
def test_create_billing_schedule(
self, api_key, billing_schedule, processing_account
):
assert billing_schedule.processing_id == processing_account.id
assert billing_schedule.charges[0].amount == 39.99
def test_update_billing_schedule_frequency(
self, api_key, billing_schedule, processing_account
):
assert billing_schedule.processing_id == processing_account.id
assert billing_schedule.charges[0].amount == 39.99
billing_schedule.update(recurring_frequency="quarterly")
assert billing_schedule.recurring_frequency == "quarterly"
|
from thebutton.genericstep import GenericStep
from datetime import datetime as dt
step_factories = {}
def step_factory(name):
def step_factory_impl(fn):
step_factories[name.lower()] = fn
return fn
return step_factory_impl
@step_factory("wait")
def wait(time):
yield GenericStep(time=time)
@step_factory("red")
def red():
yield GenericStep(colour="red")
@step_factory("green")
def green():
yield GenericStep(colour="green")
@step_factory("start")
def start():
yield GenericStep(start=True)
@step_factory("stop")
def start():
yield GenericStep(stop=True)
@step_factory("button")
def button():
yield GenericStep(wait_for_press=True)
class ChallengeComplete:
@staticmethod
def run(state):
step = GenericStep(wait_for_press=True,
colour="green",
on_press="",
stop=True)
if state.start_time is not None:
time_taken = dt.now() - state.start_time
hours = time_taken.seconds // 3600
minutes = time_taken.seconds % 3600 // 60
seconds = time_taken.seconds % 60
if hours:
step.text = "Challenge Complete!\n\nYou took:\n{:02}:{:02}:{:02}".format(hours, minutes, seconds)
step.speech = "Challenge Complete! You took {} hours, {} minutes and {} seconds".format(hours, minutes, seconds)
elif minutes:
step.text = "Challenge Complete!\n\nYou took:\n{:02}:{:02}".format(minutes, seconds)
step.speech = "Challenge Complete! You took {} minutes and {} seconds".format(minutes, seconds)
else:
step.text = "Challenge Complete!\n\nYou took:\n{}s".format(seconds)
step.speech = "Challenge Complete! You took {} seconds".format(seconds)
else:
step.text = "Challenge Complete!"
step.run(state)
@step_factory("challenge complete")
def challenge_complete():
yield ChallengeComplete
@step_factory("time up")
@step_factory("times up")
@step_factory("time's up")
def time_up():
yield GenericStep(text="Time's up, you failed the challenge.",
wait_for_press=True,
colour="green",
stop=True,
on_press="")
|
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, random_split
from torchvision.datasets import STL10
from unsupervised_pretraining.data.stl10albumentations import STL10Albumentations
import albumentations as A
class STL10DataModule(LightningDataModule):
def __init__(self, data_dir, batch_size, num_workers, train_transforms_list, test_transforms_list):
"""Модуль данных для загрузки датасета STL10.
Датасет STL-10 для экспериментов с обучением нейросетей без учителя (https://cs.stanford.edu/~acoates/stl10/).
При первом вызове скачивает датасет в указанную в data_dir директорию.
:param data_dir: директория для хранения датасета.
:param batch_size: размер батча.
:param num_workers: количество процессов для загрузки данных во время обучения/тестирования.
"""
super(STL10DataModule, self).__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.num_workers = num_workers
self.train_transform = A.Compose(train_transforms_list)
self.test_transform = A.Compose(test_transforms_list)
self.num_classes = 10
def prepare_data(self) -> None:
STL10(self.data_dir, download=True)
def setup(self, stage=None) -> None:
if stage == "fit":
train_full = STL10Albumentations(self.data_dir, split="train", transform=self.train_transform)
self.stl_train, self.stl_val = random_split(train_full, [4500, 500])
if stage == "test":
self.stl_test = STL10Albumentations(self.data_dir, split="test", transform=self.test_transform)
def train_dataloader(self):
return DataLoader(self.stl_train, batch_size=self.batch_size, num_workers=self.num_workers)
def val_dataloader(self):
return DataLoader(self.stl_val, batch_size=self.batch_size, num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.stl_test, batch_size=self.batch_size, num_workers=self.num_workers)
|
import logging
import logging.handlers
import sys
from datetime import datetime
from essentials.folders import ensure_folder
logger = None
def get_app_logger():
global logger
if logger is not None:
return logger
logger = logging.getLogger("app")
logger.setLevel(logging.INFO)
max_bytes = 24 * 1024 * 1024
file_handler = logging.handlers.RotatingFileHandler
now = datetime.now()
ts = now.strftime("%Y%m%d")
ensure_folder(f"logs/{ts}")
file_handler = file_handler(f"logs/{ts}/app.log", maxBytes=max_bytes, backupCount=5)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(logging.StreamHandler(sys.stdout))
return logger
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2013 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# parts from this software (AIS decoding) are taken from the gpsd project
# so refer to this BSD licencse also (see ais.py) or omit ais.py
###############################################################################
import glob
import avnav_handlerList
from avnav_config import AVNConfig
from avndirectorybase import *
#a writer for our track
class AVNTrackWriter(AVNDirectoryHandlerBase):
def __init__(self,param):
super(AVNTrackWriter,self).__init__(param,'track')
self.track=[]
#param checks
throw=True
self.getIntParam('cleanup', throw)
self.getFloatParam('mindistance', throw)
self.getFloatParam('interval', throw)
self.tracklock=threading.Lock()
self.baseDir=AVNConfig.getDirWithDefault(self.param,"trackdir",'tracks')
self.fname=None
self.loopCount=0
self.currentFile=None
self.initial=True
self.lastlon=None
self.lastlat=None
@classmethod
def getConfigName(cls):
return "AVNTrackWriter"
@classmethod
def getConfigParam(cls, child=None):
if child is not None:
return None
return {
'interval':10, #write every 10 seconds
'trackdir':"", #defaults to pdir/tracks
'mindistance': 25, #only write if we at least moved this distance
'cleanup': 25, #cleanup in hours
}
@classmethod
def getPrefix(cls):
return '/track'
def getTrackDir(self):
return self.baseDir
#write out the line
#timestamp is a datetime object
def writeLine(self,filehandle,timestamp,data):
ts=timestamp.isoformat();
if not ts[-1:]=="Z":
ts+="Z"
str="%s,%f,%f,%f,%f,%f\n"%(ts,data['lat'],data['lon'],(data.get('track') or 0),(data.get('speed') or 0),(data.get('distance') or 0))
filehandle.write(str)
filehandle.flush()
def createFileName(self,dt):
str=unicode(dt.strftime("%Y-%m-%d"))
return str
def cleanupTrack(self):
numremoved=0
cleanupTime=datetime.datetime.utcnow()-datetime.timedelta(hours=self.getIntParam('cleanup'))
self.tracklock.acquire()
while len(self.track) > 0:
if self.track[0][0]<=cleanupTime:
numremoved+=1
self.track.pop(0)
else:
break
self.tracklock.release()
if numremoved > 0:
AVNLog.debug("removed %d track entries older then %s",numremoved,cleanupTime.isoformat())
def handleSpecialApiRequest(self, command, requestparam, handler):
if command == 'getTrack':
return self.handleTrackRequest(requestparam)
return super(AVNTrackWriter,self).handleSpecialApiRequest(command,requestparam,handler)
def handleTrackRequest(self, requestParam):
lat = None
lon = None
dist = None
maxnum = 60 # with default settings this is one hour
interval = 60
try:
maxnumstr = AVNUtil.getHttpRequestParam(requestParam, 'maxnum')
if not maxnumstr is None:
maxnum = int(maxnumstr)
intervalstr = AVNUtil.getHttpRequestParam(requestParam, 'interval')
if not intervalstr is None:
interval = int(intervalstr)
except:
pass
frt = self.getTrackFormatted(maxnum, interval)
return frt
#get the track as array of dicts
#filter by maxnum and interval
def getTrackFormatted(self,maxnum,interval):
rt=[]
curts=None
intervaldt=datetime.timedelta(seconds=interval)
self.tracklock.acquire()
try:
for tp in self.track:
if curts is None or tp[0] > (curts + intervaldt):
entry={
'ts':AVNUtil.datetimeToTsUTC(tp[0]),
'time':tp[0].isoformat(),
'lat':tp[1],
'lon':tp[2]}
rt.append(entry)
curts=tp[0]
except:
pass
self.tracklock.release()
return rt[-maxnum:]
#read in a track file (our csv syntax)
#return an array of track data
def readTrackFile(self,filename):
rt=[]
if not os.path.exists(filename):
AVNLog.debug("unable to read track file %s",filename)
return rt
f=open(filename,"r")
if f is None:
AVNLog.debug("unable to open track file %s",filename)
return rt
AVNLog.debug("reading track file %s",filename)
try:
for line in f:
line=re.sub('#.*','',line)
par=line.split(",")
if len(par) < 3:
continue
try:
newLat=float(par[1])
newLon=float(par[2])
track=float(par[3])
speed=float(par[4])
rt.append((AVNUtil.gt(par[0]),newLat,newLon,track,speed))
except:
AVNLog.warn("exception while reading track file %s: %s",filename,traceback.format_exc())
except:
pass
f.close()
AVNLog.debug("read %d entries from %s",len(rt),filename)
return rt
#write track data to gpx file
#input: current track data
def writeGpx(self,filename,data):
header='''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<gpx xmlns="http://www.topografix.com/GPX/1/1" version="1.1" creator="avnav"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd">
<trk>
<name>avnav-track-%s</name>
<trkseg>
'''
footer='''
</trkseg>
</trk>
</gpx>
'''
trkpstr="""
<trkpt lat="%2.9f" lon="%2.9f" ><time>%s</time><course>%3.1f</course><speed>%3.2f</speed></trkpt>
"""
if os.path.exists(filename):
os.unlink(filename)
f=None
try:
f=open(filename,"w")
except:
pass
if f is None:
AVNLog.warn("unable to write to gpx file %s",filename)
return
AVNLog.debug("writing gpx file %s",filename)
title,e=os.path.splitext(os.path.basename(filename))
try:
f.write(header%(title,))
for trackpoint in data:
ts=trackpoint[0].isoformat()
if not ts[-1:]=="Z":
ts+="Z"
f.write(trkpstr%(trackpoint[1],trackpoint[2],ts,trackpoint[3],trackpoint[4]))
f.write(footer)
except:
AVNLog.warn("Exception while writing gpx file %s: %s",filename,traceback.format_exc());
f.close()
#a converter running in a separate thread
#will convert all found track files to gpx if the gpx file does not exist or is older
def converter(self):
infoName="TrackWriter:converter"
AVNLog.info("%s thread %s started",infoName,AVNLog.getThreadId())
while True:
currentTracks=glob.glob(os.path.join(self.baseDir,u"*.avt"))
for track in currentTracks:
try:
gpx=re.sub(r"avt$","gpx",track)
doCreate=True
if os.path.exists(gpx):
trackstat=os.stat(track)
gpxstat=os.stat(gpx)
if trackstat.st_mtime <= gpxstat.st_mtime:
doCreate=False
if doCreate:
AVNLog.debug("creating gpx file %s",gpx)
data=self.readTrackFile(track)
self.writeGpx(gpx,data)
except:
pass
time.sleep(60)
def onPreRun(self):
self.fname = None
theConverter = threading.Thread(target=self.converter)
theConverter.daemon = True
theConverter.start()
AVNLog.info("started with dir=%s,interval=%d, distance=%d",
self.baseDir,
self.getFloatParam("interval"),
self.getFloatParam("mindistance"))
def getSleepTime(self):
return self.getFloatParam("interval")
def periodicRun(self):
try:
self.loopCount+=1
currentTime = datetime.datetime.utcnow()
curfname = self.createFileName(currentTime)
newFile=False
realfilename=None
if not curfname == self.fname:
self.fname = curfname
if not self.currentFile is None:
self.currentFile.close()
newFile = True
realfilename = os.path.join(self.baseDir, curfname + ".avt")
AVNLog.info("new trackfile %s", realfilename)
if self.initial:
if os.path.exists(realfilename):
self.setInfo('main', "reading old track data", AVNWorker.Status.STARTED)
data = self.readTrackFile(realfilename)
for trkpoint in data:
self.track.append((trkpoint[0], trkpoint[1], trkpoint[2]))
self.initial = False
if newFile:
self.currentFile = open(realfilename, "a")
self.currentFile.write("#anvnav Trackfile started/continued at %s\n" % (currentTime.isoformat()))
self.currentFile.flush()
self.setInfo('main', "writing to %s" % (realfilename,), AVNWorker.Status.NMEA)
if self.loopCount >= 10:
self.cleanupTrack()
self.loopCount = 0
gpsdata = self.navdata.getDataByPrefix(AVNStore.BASE_KEY_GPS, 1)
lat = gpsdata.get('lat')
lon = gpsdata.get('lon')
if not lat is None and not lon is None:
if self.lastlat is None or self.lastlon is None:
AVNLog.ld("write track entry", gpsdata)
self.writeLine(self.currentFile, currentTime, gpsdata)
self.track.append((currentTime, lat, lon))
self.lastlat = lat
self.lastlon = lon
else:
dist = AVNUtil.distance((self.lastlat, self.lastlon), (lat, lon)) * AVNUtil.NM
if dist >= self.getFloatParam('mindistance'):
gpsdata['distance'] = dist
AVNLog.ld("write track entry", gpsdata)
self.writeLine(self.currentFile, currentTime, gpsdata)
self.track.append((currentTime, lat, lon))
self.lastlat = lat
self.lastlon = lon
except Exception as e:
AVNLog.error("exception in Trackwriter: %s", traceback.format_exc());
def handleDelete(self, name):
rt=super(AVNTrackWriter, self).handleDelete(name)
if name.endswith(".gpx"):
if self.fname == name[:-4]:
AVNLog.info("deleting current track!")
self.track=[]
return rt
LISTED_EXTENSIONS=['.nmea','.nmea.gz','.gpx']
def handleList(self, handler=None):
data=self.listDirectory()
rt=[]
for item in data:
for ext in self.LISTED_EXTENSIONS:
if item.name.endswith(ext):
rt.append(item)
break
return AVNUtil.getReturnData(items=rt)
avnav_handlerList.registerHandler(AVNTrackWriter)
|
import logging
from abc import abstractmethod, ABCMeta
from typing import Dict, Optional, Union
from twisted.internet.defer import Deferred, fail, succeed, inlineCallbacks
from twisted.python import failure
from twisted.python.failure import Failure
from vortex.DeferUtil import vortexLogFailure
from vortex.Payload import Payload
from vortex.PayloadEndpoint import PayloadEndpoint
from vortex.PayloadEnvelope import PayloadEnvelope
from vortex.TupleAction import TupleActionABC
from vortex.VortexABC import SendVortexMsgResponseCallable
logger = logging.getLogger(__name__)
class TupleActionProcessorDelegateABC(metaclass=ABCMeta):
@abstractmethod
def processTupleAction(self, tupleAction: TupleActionABC) -> Deferred:
""" Process Tuple Action
The method generates the vortexMsg for the vortex to send.
:param tupleAction: The C{TupleAction} to process.
"""
class TupleActionProcessor:
def __init__(self, tupleActionProcessorName: str,
additionalFilt: Optional[Dict] = None,
defaultDelegate: Optional[TupleActionProcessorDelegateABC] = None,
acceptOnlyFromVortex: Optional[Union[str,tuple]] = None,
usedForProxy__=False) -> None:
""" Constructor
:param tupleActionProcessorName: The name of this observable
:param additionalFilt: Any additional filter keys that are required
:param defaultDelegate: The default delegate to send all actions to
:param acceptOnlyFromVortex: Accept requests only from this vortex,
The vortex can be str or tuple of str, or None to accept from any.
"""
self._tupleActionProcessorName = tupleActionProcessorName
self._defaultDelegate = defaultDelegate
self._tupleProcessorsByTupleName: Dict[str, TupleActionProcessorDelegateABC] = {}
if not usedForProxy__:
self._filt = dict(name=tupleActionProcessorName,
key="tupleActionProcessorName")
if additionalFilt:
self._filt.update(additionalFilt)
self._endpoint = PayloadEndpoint(self._filt, self._process,
acceptOnlyFromVortex=acceptOnlyFromVortex)
def setDelegate(self, tupleName: str, processor: TupleActionProcessorDelegateABC):
""" Add Tuple Action Processor Delegate
:param tupleName: The tuple name to process actions for.
:param processor: The processor to use for processing this tuple name.
"""
assert not tupleName in self._tupleProcessorsByTupleName, (
"TupleActionProcessor:%s, Tuple name %s is already registered" %
(self._tupleActionProcessorName, tupleName))
assert isinstance(processor, TupleActionProcessorDelegateABC), (
"TupleActionProcessor:%s, provider must be an"
" instance of TupleActionProcessorDelegateABC"
% self._tupleActionProcessorName)
self._tupleProcessorsByTupleName[tupleName] = processor
@property
def delegateCount(self) -> int:
return len(self._tupleProcessorsByTupleName)
def hasDelegate(self, tupleName: str) -> bool:
return tupleName in self._tupleProcessorsByTupleName
def shutdown(self):
self._endpoint.shutdown()
@inlineCallbacks
def _process(self, payloadEnvelope: PayloadEnvelope,
sendResponse: SendVortexMsgResponseCallable, **kwargs):
""" Process the Payload / Tuple Action
"""
payload = yield payloadEnvelope.decodePayloadDefer()
assert len(payload.tuples) == 1, (
"TupleActionProcessor:%s Expected 1 tuples, received %s" % (
self._tupleActionProcessorName, len(payload.tuples)))
tupleAction = payload.tuples[0]
self._processTupleAction(payloadEnvelope.filt, sendResponse, tupleAction)
def _processTupleAction(self, payloadEnvelopeFilt, sendResponse, tupleAction):
assert isinstance(tupleAction, TupleActionABC), \
"TupleActionProcessor:%s Expected TupleAction, received %s" \
% (self._tupleActionProcessorName, tupleAction.__class__)
tupleName = tupleAction.tupleName()
processor = self._tupleProcessorsByTupleName.get(tupleName)
if processor:
delegate = processor.processTupleAction
elif self._defaultDelegate:
delegate = self._defaultDelegate.processTupleAction
else:
raise Exception("No delegate registered for %s" % tupleName)
d = self._customMaybeDeferred(delegate, tupleAction)
d.addCallback(self._callback, payloadEnvelopeFilt, tupleName, sendResponse)
d.addErrback(self._errback, payloadEnvelopeFilt, tupleName, sendResponse)
@inlineCallbacks
def _callback(self, result, replyFilt: dict, tupleName: str,
sendResponse: SendVortexMsgResponseCallable):
if not isinstance(result, list):
result = [result]
payload = Payload(filt=replyFilt, tuples=result)
payloadEnvelope = yield payload.makePayloadEnvelopeDefer()
vortexMsg = yield payloadEnvelope.toVortexMsgDefer()
try:
yield sendResponse(vortexMsg)
except Exception as e:
logger.error("Failed to send TupleAction response for %s", tupleName)
logger.exception(e)
@inlineCallbacks
def _errback(self, result: Failure, replyFilt: dict, tupleName: str,
sendResponse: SendVortexMsgResponseCallable):
logger.error("TupleActionProcessor:%s Failed to process TupleActon",
self._tupleActionProcessorName)
vortexLogFailure(result, logger)
failureMessage = result.getErrorMessage()
payloadEnvelope = PayloadEnvelope(filt=replyFilt, result=failureMessage)
vortexMsg = yield payloadEnvelope.toVortexMsgDefer()
try:
yield sendResponse(vortexMsg)
except Exception as e:
logger.error("Failed to send TupleAction response for %s\n%s",
tupleName,
failureMessage)
logger.exception(e)
def _customMaybeDeferred(self, f, *args, **kw):
try:
result = f(*args, **kw)
except Exception as e:
return fail(failure.Failure(e))
if isinstance(result, Deferred):
return result
elif isinstance(result, failure.Failure):
return fail(result)
else:
return succeed(result)
|
from ..base import GnuRecipe
class LibXcbRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(LibXcbRecipe, self).__init__(*args, **kwargs)
self.sha256 = '4adfb1b7c67e99bc9c2ccb110b2f1756' \
'86576d2f792c8a71b9c8b19014057b5b'
self.name = 'libxcb'
self.version = '1.12'
self.url = 'https://xcb.freedesktop.org/dist/libxcb-$version.tar.bz2'
self.depends = ['libXau', 'pthread-stubs', 'util-macros', 'xcb-proto']
|
import sys, numpy as np
from igakit.nurbs import NURBS
N = 10
p = 2
if len(sys.argv) >= 2:
N = int(sys.argv[1])
if len(sys.argv) >= 3:
p = int(sys.argv[2])
U = [0,0, 1,1]
V = [0,0,0, 1,1,1]
C = np.zeros((2,3,4))
val = np.sqrt(2)*0.5
C[0,0,:] = [ 0,-100, 0,1]
C[1,0,:] = [100,-100, 0,1]
C[0,1,:] = [ 0,-100,100,1]
C[1,1,:] = [100,-100,100,1]
C[0,2,:] = [ 0, 0,100,1]
C[1,2,:] = [100, 0,100,1]
C[:,1,:] *= val
geom = NURBS([U,V],C)
geom.elevate(0,max(p-1,0)).elevate(1,max(p-2,0))
h = 1./N
insert = np.linspace(h,1.-h,N-1)
geom.refine(0,insert).refine(1,insert)
if True:
from igakit.io import PetIGA
PetIGA().write("ClassicalShell.dat", geom, nsd=3)
if False:
from igakit.plot import plt
plt.figure()
plt.cpoint(geom)
plt.cwire(geom)
plt.kwire(geom)
plt.surface(geom)
plt.show()
if False:
from igakit.io import PetIGA, VTK
nrb = PetIGA().read("ClassicalShell.dat")
sol = PetIGA().read_vec("ClassicalShell.out",nrb)
U = sol[...,:3]
X = nrb.points
W = nrb.weights
nrb = NURBS(nrb.knots, (X,W), U)
VTK().write("ClassicalShell.vtk", nrb,
scalars=dict(),
vectors=dict(displacement=[0,1,2]),
)
|
import sys
def init():
search_str = "sgtring" # "EIN MUSTER P WIRD IN EINEM GEGEBENEN TEXT GESUCHT"
pattern = "gstring" # "KEIN"
print(f"""
+-------------------------------
| String Matcher |
+-------------------------------
Search text: {search_str}
Pattern: {pattern}
""")
# Naive algorithm
na_no_matches = naive_search(search_str, pattern)
print(f"Naive algorithm: \n\tMatch count: {na_no_matches}")
# Boyer-Moore algorithm
bm_no_matches = boyer_moore_search(search_str, pattern)
print(f"Boyer-Moore algorithm: \n\tMatch count: {bm_no_matches}")
def naive_search(text, pattern):
"""
Naive string matching implementation.
Runtime:
- Best Case = Average Case: O(n) the first character does not match every time
- Worst Case: O(n * m) Each time all characters match except the last
:param text: text to be searched
:type text: str
:param pattern: pattern to be found in text
:type pattern: str
:return number of matches of pattern in text.
"""
match_count = 0
for i in range(len(text) - len(pattern) + 1):
match = True
j = 0
for j in range(len(pattern)):
if text[i + j] != pattern[j]:
match = False
break
if match:
match_count += 1
return match_count
def boyer_moore_search(text, pattern):
"""
Boyer-Moore string matching implementation.
Runtime:
- Best Case = Average Case: O(n/m) the first character does not match every time
- Worst Case: O(n * m) Each time all characters match (except the last)
:param text: text to be searched
:type text: str
:param pattern: pattern to be found in text
:type pattern: str
:return number of matches of pattern in text.
"""
# build shift table for unicode text
size_of_alphabet = sys.maxunicode
pattern_length = len(pattern)
shift = [pattern_length] * size_of_alphabet
for i in range(len(pattern)):
shift[ord(pattern[i])] = pattern_length - i - 1
match_count = 0
# define text and pattern index (start with last character of pattern)
t_idx, p_idx = pattern_length - 1, pattern_length - 1
print()
print(text)
while t_idx < len(text):
print(" " * (t_idx - p_idx), end="")
print(pattern)
print(" " * t_idx, end="")
print("-")
if text[t_idx] == pattern[p_idx]:
# print("Character matched!")
# characters match
if p_idx == 0:
# print("Match found!")
# all characters of pattern have matched -> occurrence found!
# reset index so a new match can be found
# i.e. p_idx to last character of pattern and
# t_idx to first character in text after found match
p_idx = pattern_length - 1
t_idx += pattern_length
match_count += 1
else:
# compare next character
p_idx -= 1
t_idx -= 1
else:
# characters do not match
shift_value = shift[ord(text[t_idx])]
if pattern_length - p_idx > shift_value:
# number of already compared characters before mismatch is greater than shift value
# e.g. with text="sgtring" and pattern="gstring"
t_idx += pattern_length - p_idx
else:
# shift by shift value for given character
t_idx += shift_value
p_idx = pattern_length - 1
return match_count
if __name__ == "__main__":
init()
|
import pymysql
def selectUnFinishCompany():
conn = pymysql.connect(host='localhost', user='root', passwd='123', db='tianyan', port=3306, charset='utf8')
cur = conn.cursor() # 获取一个游标
sql = "select * from zb_tbcompanyinfo"
try:
cur.execute(sql)
results = cur.fetchall()
return results
except:
print("数据库操作失败:公司获取失败")
cur.close() # 关闭游标
conn.close() # 释放数据库资源
def updateConpany(id):
conn = pymysql.connect(host='localhost', user='root', passwd='123', db='tianyan', port=3306, charset='utf8')
cur = conn.cursor() # 获取一个游标
sql = "update company set done = 1 where id = '%d'"
print(sql)
try:
cur.execute(sql % id)
except:
print("Error: unable to fecth data")
conn.commit()
cur.close() # 关闭游标
conn.close() # 释放数据库资源
def updateConpanyFlase(id):
conn = pymysql.connect(host='localhost', user='root', passwd='123', db='tianyan', port=3306, charset='utf8')
cur = conn.cursor() # 获取一个游标
sql = "update company set done = 0 where id = '%d'"
print(sql)
try:
cur.execute(sql % id)
except:
print("Error: unable to fecth data")
conn.commit()
cur.close() # 关闭游标
conn.close() # 释放数据库资源
def insertCompany(values):
conn = pymysql.connect(host='localhost', user='root', passwd='123', db='tianyan', port=3306, charset='utf8')
cur = conn.cursor() # 获取一个游标
sql = "INSERT INTO result (id, companyname,pname, reg_code, creditcode , reg_address, tax_code) " \
"VALUES ( '%d', '%s','%s', '%s','%s', '%s','%s') "
print(sql)
try:
cur.execute(sql % values)
except:
print("Error: unable to fecth data")
conn.commit()
cur.close() # 关闭游标
conn.close() # 释放数据库资源 |
class MountainRoad:
def findDistance(self, start, finish):
return 2**.5 * (max(finish) - min(start))
|
# from tellers import Tellers
from modules.schedules import *
from modules.timetable import *
# tellers = Tellers()
#seances = Seances()
# seances = Seances()
|
from attrdict import AttrDict
from ..vocab import NULL
class SlotBase(AttrDict):
REQUIRED_ARGS = [('name', str), ('is_null', bool), ('is_nullable', bool)]
def __init__(self, **kwargs):
for ra in SlotBase.REQUIRED_ARGS:
key = ra[0]
value = kwargs[key]
super(SlotBase, self).__init__(**kwargs)
def __str__(self):
return f"Slot({self.name})"
def __repr__(self):
return self.__str__()
def set_property(self, **kwargs):
for k, v in kwargs.items():
self[k] = v
class TemplateBase:
def __init__(self, slots, **kwargs):
self.name = kwargs.pop('name', self.__class__.__name__)
self.slots = slots
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __lt__(self, other):
return self.name < other.name
@property
def slot_num(self):
return len(self.slots)
class TemplateCollectionBase:
def __init__(self, templates, vocab, masker, slot_names):
self.templates = templates
self.vocab = vocab
self.masker = masker
self.__slot_names = slot_names
def get(self, idx_or_str):
if isinstance(idx_or_str, str):
tmp = [t for t in self.templates if t.name == idx_or_str]
if len(tmp) == 0:
return None
else:
return tmp[0]
elif isinstance(idx_or_str, int):
return self.templates[idx_or_str]
def generate(self, action):
action = action[:, 1:]
action_strs_spl = self.vocab.to_str_batch(action, int_cast=True)
action_strs = []
for a in action_strs_spl:
a = [w.replace(NULL, '') for (idx, w) in enumerate(a)]
a = ' '.join(a).strip()
action_strs.append(a)
return action_strs
def generate_mask(self, template_idx, masker_params=None):
template = self.templates[template_idx]
masker_params = masker_params or {}
return self.masker.generate_mask(template, **masker_params)
def generate_mask_batch(self, template_indices, masker_params=None):
templates = [self.templates[i] for i in template_indices]
masker_params = masker_params or {}
return self.masker.generate_mask_batch(templates, **masker_params)
def generate_mask_all(self, masker_params=None):
return [self.generate_mask(i, masker_params) for i in range(self.template_num)]
def generate_mask_all_batch(self, masker_params=None, batch_size=None):
if masker_params is not None:
tmp_key = list(masker_params.keys())[0]
batch_size = len(masker_params[tmp_key])
else:
assert batch_size is not None
all_template_indices = list(range(self.template_num))
return [self.generate_mask_batch(all_template_indices, masker_params) for i in range(batch_size)]
def convert_mask_to_str(self, mask):
return self.masker.check_mask(mask)
def is_generable(self, target, verbose=True, **kwargs):
if isinstance(target, str):
target = target.split()
for w in target:
if not self.vocab.has(w):
return False, {}
if len(target) < 4:
target += ['Null'] * (4 - len(target))
status = {}
for t in sorted(self.templates):
status[t.name] = {s: {'n': 0, 'tokens': []} for s in ['v', 'o1', 'p', 'o2']}
masks, masks_per_masker = self.masker.generate_mask(t, return_all=True, **kwargs)
success = True
for w, (slot_name, mask) in zip(target, masks.items()):
if slot_name == NULL:
status['t.name'][slot_name]['n'] = 1
status['t.name'][slot_name]['tokens'].append('Null')
continue
i = self.vocab.to_id(w)
available_tokens = self.vocab.to_str_list(mask.nonzero(as_tuple=False).view(-1).tolist())
status[t.name][slot_name] = \
{'n': len(available_tokens), 'tokens': available_tokens}
if not mask[i]:
success = False
status[t.name]['success'] = success
generable_templates = [(t, d) for (t, d) in status.items() if d['success']]
success = len(generable_templates) != 0
return success, status
@property
def template_num(self):
return len(self.templates)
@property
def vocab_size(self):
return self.vocab.size
@property
def slot_num(self):
return max([t.slot_num for t in self.templates])
@property
def slot_names(self):
return self.__slot_names
|
#!/usr/bin/python
import sys
import pandas as pd
from string import Template
def get_duplicated_rows(data_frame):
duplicated_element = data_frame.duplicated()
return data_frame[duplicated_element]
def get_duplicated_row_numbers(duplicate_row_df):
return len(duplicate_row_df.index)
if len(sys.argv) != 3:
print('Number of arguments that you give is wrong, please enter the path of the file which you want to analyze.')
else:
input_path = sys.argv[1]
output_path = sys.argv[2]
# print(path)
df = pd.read_csv(input_path, index_col=0)
result_str = '###################################################\n' \
'The total number of duplicated rows is $duplicated_row_number\n' \
'###################################################\n' \
'The duplicated rows are: \n' \
'$duplicated_rows\n' \
'###################################################\n'
temp_obj = Template(result_str)
duplicated_rows = get_duplicated_rows(df)
duplicated_row_numbers = get_duplicated_row_numbers(duplicated_rows)
result = temp_obj.substitute(duplicated_row_number=duplicated_row_numbers,
duplicated_rows=duplicated_rows.to_string())
print(result)
f = open(output_path, "w")
if duplicated_row_numbers >= 1:
f.write("True")
else:
f.write("False")
f.close()
# python CheckDuplication.py /home/pliu/data_set/argo_data_pipeline/pokemon-bad.csv /tmp/output_params.txt
|
import os
from collections import namedtuple
import sublime
from sublime_plugin import WindowCommand, TextCommand, EventListener
from ...common import util
from .navigate import GsNavigate
from ...common.theme_generator import XMLThemeGenerator, JSONThemeGenerator
from ..git_command import GitCommand
from ..constants import MERGE_CONFLICT_PORCELAIN_STATUSES
from ...common.util import debug
HunkReference = namedtuple("HunkReference", ("section_start", "section_end", "hunk", "line_types", "lines"))
INLINE_DIFF_TITLE = "DIFF: "
INLINE_DIFF_CACHED_TITLE = "DIFF (cached): "
DIFF_HEADER = """diff --git a/{path} b/{path}
--- a/{path}
+++ b/{path}
"""
inline_diff_views = {}
diff_view_hunks = {}
class GsInlineDiffCommand(WindowCommand, GitCommand):
"""
Given an open file in a git-tracked directory, show a new view with the
diff (against HEAD) displayed inline. Allow the user to stage or reset
hunks or individual lines, and to navigate between hunks.
"""
def run(self, **kwargs):
sublime.set_timeout_async(lambda: self.run_async(**kwargs), 0)
def run_async(self, settings=None, cached=False):
if settings is None:
file_view = self.window.active_view()
syntax_file = file_view.settings().get("syntax")
settings = {
"git_savvy.file_path": self.file_path,
"git_savvy.repo_path": self.repo_path
}
else:
syntax_file = settings["syntax"]
del settings["syntax"]
view_key = "{0}+{1}".format(cached, settings["git_savvy.file_path"])
if view_key in inline_diff_views and inline_diff_views[view_key] in sublime.active_window().views():
diff_view = inline_diff_views[view_key]
else:
diff_view = util.view.get_scratch_view(self, "inline_diff", read_only=True)
title = INLINE_DIFF_CACHED_TITLE if cached else INLINE_DIFF_TITLE
diff_view.set_name(title + os.path.basename(settings["git_savvy.file_path"]))
diff_view.set_syntax_file(syntax_file)
file_ext = util.file.get_file_extension(os.path.basename(settings["git_savvy.file_path"]))
self.augment_color_scheme(diff_view, file_ext)
diff_view.settings().set("git_savvy.inline_diff_view.in_cached_mode", cached)
for k, v in settings.items():
diff_view.settings().set(k, v)
inline_diff_views[view_key] = diff_view
file_binary = util.file.get_file_contents_binary(
settings["git_savvy.repo_path"], settings["git_savvy.file_path"])
try:
file_binary.decode()
except UnicodeDecodeError as unicode_err:
try:
file_binary.decode("latin-1")
diff_view.settings().set("git_savvy.inline_diff.encoding", "latin-1")
except UnicodeDecodeError as unicode_err:
fallback_encoding = self.savvy_settings.get("fallback_encoding")
diff_view.settings().set("git_savvy.inline_diff.encoding", fallback_encoding)
self.window.focus_view(diff_view)
diff_view.run_command("gs_inline_diff_refresh")
diff_view.run_command("gs_handle_vintageous")
def augment_color_scheme(self, target_view, file_ext):
"""
Given a target view, generate a new color scheme from the original with
additional inline-diff-related style rules added. Save this color scheme
to disk and set it as the target view's active color scheme.
"""
colors = self.savvy_settings.get("colors")
original_color_scheme = target_view.settings().get("color_scheme")
if original_color_scheme.endswith(".tmTheme"):
themeGenerator = XMLThemeGenerator(original_color_scheme)
else:
themeGenerator = JSONThemeGenerator(original_color_scheme)
themeGenerator.add_scoped_style(
"GitSavvy Added Line",
"git_savvy.change.addition",
background=colors["inline_diff"]["add_background"],
foreground=colors["inline_diff"]["add_foreground"]
)
themeGenerator.add_scoped_style(
"GitSavvy Removed Line",
"git_savvy.change.removal",
background=colors["inline_diff"]["remove_background"],
foreground=colors["inline_diff"]["remove_foreground"]
)
themeGenerator.add_scoped_style(
"GitSavvy Added Line Bold",
"git_savvy.change.addition.bold",
background=colors["inline_diff"]["add_background_bold"],
foreground=colors["inline_diff"]["add_foreground_bold"]
)
themeGenerator.add_scoped_style(
"GitSavvy Removed Line Bold",
"git_savvy.change.removal.bold",
background=colors["inline_diff"]["remove_background_bold"],
foreground=colors["inline_diff"]["remove_foreground_bold"]
)
themeGenerator.apply_new_theme("active-diff-view." + file_ext, target_view)
class GsInlineDiffRefreshCommand(TextCommand, GitCommand):
"""
Diff one version of a file (the base) against another, and display the
changes inline.
If not in `cached` mode, compare the file in the working tree against the
same file in the index. If a line or hunk is selected and the primary
action for the view is taken (pressing `l` or `h` for line or hunk,
respectively), add that line/hunk to the index. If a line or hunk is
selected and the secondary action for the view is taken (pressing `L` or
`H`), remove those changes from the file in the working tree.
If in `cached` mode, compare the file in the index againt the same file
in the HEAD. If a link or hunk is selected and the primary action for
the view is taken, remove that line from the index. Secondary actions
are not supported in `cached` mode.
"""
def run(self, edit):
file_path = self.file_path
in_cached_mode = self.view.settings().get("git_savvy.inline_diff_view.in_cached_mode")
ignore_eol_arg = (
"--ignore-space-at-eol"
if self.savvy_settings.get("inline_diff_ignore_eol_whitespaces", True)
else None
)
if in_cached_mode:
indexed_object = self.get_indexed_file_object(file_path)
head_file_object = self.get_head_file_object(file_path)
head_file_contents = self.get_object_contents(head_file_object)
# Display the changes introduced between HEAD and index.
stdout = self.git("diff", "--no-color", "-U0", ignore_eol_arg, head_file_object, indexed_object)
diff = util.parse_diff(stdout)
inline_diff_contents, replaced_lines = \
self.get_inline_diff_contents(head_file_contents, diff)
else:
indexed_object = self.get_indexed_file_object(file_path)
indexed_object_contents = self.get_object_contents(indexed_object)
working_tree_file_contents = util.file.get_file_contents_binary(self.repo_path, file_path)
working_tree_file_object = self.get_object_from_string(working_tree_file_contents)
# Display the changes introduced between index and working dir.
stdout = self.git("diff", "--no-color", "-U0", ignore_eol_arg, indexed_object, working_tree_file_object)
diff = util.parse_diff(stdout)
inline_diff_contents, replaced_lines = \
self.get_inline_diff_contents(indexed_object_contents, diff)
cursors = self.view.sel()
if cursors:
row, col = self.view.rowcol(cursors[0].begin())
self.view.set_read_only(False)
self.view.replace(edit, sublime.Region(0, self.view.size()), inline_diff_contents)
if cursors:
if (row, col) == (0, 0) and self.savvy_settings.get("inline_diff_auto_scroll", False):
self.view.run_command("gs_inline_diff_navigate_hunk")
else:
self.view.sel().clear()
pt = self.view.text_point(row, 0)
self.view.sel().add(sublime.Region(pt, pt))
self.view.show_at_center(pt)
# The following shouldn't strictly be necessary, but Sublime sometimes jumps
# to the right when show_at_center for a column-zero-point occurs.
_, vp_y = self.view.viewport_position()
self.view.set_viewport_position((0, vp_y), False)
self.highlight_regions(replaced_lines)
self.view.set_read_only(True)
sublime.set_timeout_async(lambda: self.verify_not_conflict(), 0)
def get_inline_diff_contents(self, original_contents, diff):
"""
Given a file's original contents and an array of hunks that could be
applied to it, return a string with the diff lines inserted inline.
Also return an array of inlined-hunk information to be used for
diff highlighting.
Remove any `-` or `+` characters at the beginning of each line, as
well as the header summary line. Additionally, store relevant data
in `diff_view_hunks` to be used when the user takes an
action in the view.
"""
hunks = []
diff_view_hunks[self.view.id()] = hunks
lines = original_contents.split("\n")
replaced_lines = []
adjustment = 0
for hunk in diff:
# Git line-numbers are 1-indexed, lists are 0-indexed.
head_start = hunk.head_start - 1
# If the change includes only added lines, the head_start value
# will be off-by-one.
head_start += 1 if hunk.head_length == 0 else 0
head_end = head_start + hunk.head_length
# Remove the `@@` header line.
diff_lines = hunk.raw_lines[1:]
section_start = head_start + adjustment
section_end = section_start + len(diff_lines)
line_types = [line[0] for line in diff_lines]
raw_lines = [line[1:] for line in diff_lines]
# Store information about this hunk, with proper references, so actions
# can be taken when triggered by the user (e.g. stage line X in diff_view).
hunks.append(HunkReference(
section_start, section_end, hunk, line_types, raw_lines
))
# Discard the first character of every diff-line (`+`, `-`).
lines = lines[:section_start] + raw_lines + lines[head_end + adjustment:]
replaced_lines.append((section_start, section_end, line_types, raw_lines))
adjustment += len(diff_lines) - hunk.head_length
return "\n".join(lines), replaced_lines
def highlight_regions(self, replaced_lines):
"""
Given an array of tuples, where each tuple contains the start and end
of an inlined diff hunk as well as an array of line-types (add/remove)
for the lines in that hunk, highlight the added regions in green and
the removed regions in red.
"""
add_regions = []
add_bold_regions = []
remove_regions = []
remove_bold_regions = []
for section_start, section_end, line_types, raw_lines in replaced_lines:
region_start = None
region_end = None
region_type = None
for type_index, line_number in enumerate(range(section_start, section_end)):
line = self.view.full_line(self.view.text_point(line_number, 0))
line_type = line_types[type_index]
if not region_type:
region_type = line_type
region_start = line.begin()
elif region_type != line_type:
region_end = line.begin()
list_ = add_regions if region_type == "+" else remove_regions
list_.append(sublime.Region(region_start, region_end))
region_type = line_type
region_start = line.begin()
region_end = line.end()
list_ = add_regions if region_type == "+" else remove_regions
list_.append(sublime.Region(region_start, region_end))
# If there are both additions and removals in the hunk, display additional
# highlighting for the in-line changes (if similarity is above threshold).
if "+" in line_types and "-" in line_types:
# Determine start of hunk/section.
section_start_idx = self.view.text_point(section_start, 0)
# Removed lines come first in a hunk.
remove_start = section_start_idx
first_added_line = line_types.index("+")
add_start = section_start_idx + len("\n".join(raw_lines[:first_added_line])) + 1
removed_part = "\n".join(raw_lines[:first_added_line])
added_part = "\n".join(raw_lines[first_added_line:])
changes = util.diff_string.get_changes(removed_part, added_part)
for change in changes:
if change.type in (util.diff_string.DELETE, util.diff_string.REPLACE):
# Display bold color in removed hunk area.
region_start = remove_start + change.old_start
region_end = remove_start + change.old_end
remove_bold_regions.append(sublime.Region(region_start, region_end))
if change.type in (util.diff_string.INSERT, util.diff_string.REPLACE):
# Display bold color in added hunk area.
region_start = add_start + change.new_start
region_end = add_start + change.new_end
add_bold_regions.append(sublime.Region(region_start, region_end))
self.view.add_regions("git-savvy-added-lines", add_regions, scope="git_savvy.change.addition")
self.view.add_regions("git-savvy-removed-lines", remove_regions, scope="git_savvy.change.removal")
self.view.add_regions("git-savvy-added-bold", add_bold_regions, scope="git_savvy.change.addition.bold")
self.view.add_regions("git-savvy-removed-bold", remove_bold_regions, scope="git_savvy.change.removal.bold")
def verify_not_conflict(self):
fpath = self.get_rel_path()
status_file_list = self.get_status()
for f in status_file_list:
if f.path == fpath:
if (f.index_status, f.working_status) in MERGE_CONFLICT_PORCELAIN_STATUSES:
sublime.error_message("Inline-diff cannot be displayed for this file - "
"it has a merge conflict.")
self.view.window().focus_view(self.view)
self.view.window().run_command("close_file")
break
class GsInlineDiffFocusEventListener(EventListener):
"""
If the current view is an inline-diff view, refresh the view with
latest file status when the view regains focus.
"""
def on_activated(self, view):
if view.settings().get("git_savvy.inline_diff_view") is True:
view.run_command("gs_inline_diff_refresh")
class GsInlineDiffStageOrResetBase(TextCommand, GitCommand):
"""
Base class for any stage or reset operation in the inline-diff view.
Determine the line number of the current cursor location, and use that
to determine what diff to apply to the file (implemented in subclass).
"""
def run(self, edit, **kwargs):
sublime.set_timeout_async(lambda: self.run_async(**kwargs), 0)
def run_async(self, reset=False):
in_cached_mode = self.view.settings().get("git_savvy.inline_diff_view.in_cached_mode")
ignore_ws = (
"--ignore-whitespace"
if self.savvy_settings.get("inline_diff_ignore_eol_whitespaces", True)
else None
)
selections = self.view.sel()
region = selections[0]
# For now, only support staging selections of length 0.
if len(selections) > 1 or not region.empty():
return
# Git lines are 1-indexed; Sublime rows are 0-indexed.
line_number = self.view.rowcol(region.begin())[0] + 1
diff_lines = self.get_diff_from_line(line_number, reset)
rel_path = self.get_rel_path()
if os.name == "nt":
# Git expects `/`-delimited relative paths in diff.
rel_path = rel_path.replace("\\", "/")
header = DIFF_HEADER.format(path=rel_path)
full_diff = header + diff_lines + "\n"
# The three argument combinations below result from the following
# three scenarios:
#
# 1) The user is in non-cached mode and wants to stage a line/hunk, so
# do NOT apply the patch in reverse, but do apply it only against
# the cached/indexed file (not the working tree).
# 2) The user is in non-cached mode and wants to undo a line/hunk, so
# DO apply the patch in reverse, and do apply it both against the
# index and the working tree.
# 3) The user is in cached mode and wants to undo a line/hunk, so DO
# apply the patch in reverse, but only apply it against the cached/
# indexed file.
#
# NOTE: When in cached mode, the action taken will always be to apply
# the patch in reverse only to the index.
args = [
"apply",
"--unidiff-zero",
"--reverse" if (reset or in_cached_mode) else None,
"--cached" if (not reset or in_cached_mode) else None,
ignore_ws,
"-"
]
encoding = self.view.settings().get('git_savvy.inline_diff.encoding', 'UTF-8')
self.git(*args, stdin=full_diff, stdin_encoding=encoding)
self.save_to_history(args, full_diff, encoding)
self.view.run_command("gs_inline_diff_refresh")
def save_to_history(self, args, full_diff, encoding):
"""
After successful `git apply`, save the apply-data into history
attached to the view, for later Undo.
"""
history = self.view.settings().get("git_savvy.inline_diff.history") or []
history.append((args, full_diff, encoding))
self.view.settings().set("git_savvy.inline_diff.history", history)
class GsInlineDiffStageOrResetLineCommand(GsInlineDiffStageOrResetBase):
"""
Given a line number, generate a diff of that single line in the active
file, and apply that diff to the file. If the `reset` flag is set to
`True`, apply the patch in reverse (reverting that line to the version
in HEAD).
"""
def get_diff_from_line(self, line_no, reset):
hunks = diff_view_hunks[self.view.id()]
add_length_earlier_in_diff = 0
cur_hunk_begin_on_minus = 0
cur_hunk_begin_on_plus = 0
# Find the correct hunk.
for hunk_ref in hunks:
if hunk_ref.section_start <= line_no and hunk_ref.section_end >= line_no:
break
else:
# we loop through all hooks before selected hunk.
# used create a correct diff when stage, unstage
# need to make undo work properly.
for type in hunk_ref.line_types:
if type == "+":
add_length_earlier_in_diff += 1
elif type == "-":
add_length_earlier_in_diff -= 1
else:
# should never happen that it will raise.
raise ValueError('type have to be eather "+" or "-"')
# Correct hunk not found.
else:
return
section_start = hunk_ref.section_start + 1
# Determine head/staged starting line.
index_in_hunk = line_no - section_start
line = hunk_ref.lines[index_in_hunk]
line_type = hunk_ref.line_types[index_in_hunk]
# need to make undo work properly when undoing
# a specific line.
for type in hunk_ref.line_types[:index_in_hunk]:
if type == "-":
cur_hunk_begin_on_minus += 1
else:
# type will be +
cur_hunk_begin_on_plus += 1
# Removed lines are always first with `git diff -U0 ...`. Therefore, the
# line to remove will be the Nth line, where N is the line index in the hunk.
head_start = hunk_ref.hunk.head_start if line_type == "+" else hunk_ref.hunk.head_start + index_in_hunk
if reset:
xhead_start = head_start - index_in_hunk + (0 if line_type == "+" else add_length_earlier_in_diff)
xnew_start = head_start - cur_hunk_begin_on_minus + index_in_hunk + add_length_earlier_in_diff - 1
return (
"@@ -{head_start},{head_length} +{new_start},{new_length} @@\n"
"{line_type}{line}").format(
head_start=(xhead_start if xhead_start >= 0 else cur_hunk_begin_on_plus),
head_length="0" if line_type == "+" else "1",
# If head_length is zero, diff will report original start position
# as one less than where the content is inserted, for example:
# @@ -75,0 +76,3 @@
new_start=xhead_start + (1 if line_type == "+" else 0),
new_length="1" if line_type == "+" else "0",
line_type=line_type,
line=line
)
else:
head_start += 1
return (
"@@ -{head_start},{head_length} +{new_start},{new_length} @@\n"
"{line_type}{line}").format(
head_start=head_start + (-1 if line_type == "-" else 0),
head_length="0" if line_type == "+" else "1",
# If head_length is zero, diff will report original start position
# as one less than where the content is inserted, for example:
# @@ -75,0 +76,3 @@
new_start=head_start + (-1 if line_type == "-" else 0),
new_length="1" if line_type == "+" else "0",
line_type=line_type,
line=line
)
class GsInlineDiffStageOrResetHunkCommand(GsInlineDiffStageOrResetBase):
"""
Given a line number, generate a diff of the hunk containing that line,
and apply that diff to the file. If the `reset` flag is set to `True`,
apply the patch in reverse (reverting that hunk to the version in HEAD).
"""
def get_diff_from_line(self, line_no, reset):
hunks = diff_view_hunks[self.view.id()]
add_length_earlier_in_diff = 0
# Find the correct hunk.
for hunk_ref in hunks:
if hunk_ref.section_start <= line_no and hunk_ref.section_end >= line_no:
break
else:
# we loop through all hooks before selected hunk.
# used create a correct diff when stage, unstage
# need to make undo work properly.
for type in hunk_ref.line_types:
if type == "+":
add_length_earlier_in_diff += 1
elif type == "-":
add_length_earlier_in_diff -= 1
else:
# should never happen that it will raise.
raise ValueError('type have to be eather "+" or "-"')
# Correct hunk not found.
else:
return
stand_alone_header = \
"@@ -{head_start},{head_length} +{new_start},{new_length} @@".format(
head_start=hunk_ref.hunk.head_start + (add_length_earlier_in_diff if reset else 0),
head_length=hunk_ref.hunk.head_length,
# If head_length is zero, diff will report original start position
# as one less than where the content is inserted, for example:
# @@ -75,0 +76,3 @@
new_start=hunk_ref.hunk.head_start + (0 if hunk_ref.hunk.head_length else 1),
new_length=hunk_ref.hunk.saved_length
)
return "\n".join([stand_alone_header] + hunk_ref.hunk.raw_lines[1:])
class GsInlineDiffOpenFile(TextCommand):
"""
Opens an editable view of the file being diff'd.
"""
@util.view.single_cursor_coords
def run(self, coords, edit):
if not coords:
return
cursor_line, cursor_column = coords
# Git lines/columns are 1-indexed; Sublime rows/columns are 0-indexed.
row, col = self.get_editable_position(cursor_line + 1, cursor_column + 1)
self.open_file(row, col)
def open_file(self, row, col):
file_name = self.view.settings().get("git_savvy.file_path")
self.view.window().open_file(
"{file}:{row}:{col}".format(
file=file_name,
row=row,
col=col
),
sublime.ENCODED_POSITION
)
def get_editable_position(self, line_no, col_no):
hunk_ref = self.get_closest_hunk_ref_before(line_no)
# No diff hunks exist before the selected line.
if not hunk_ref:
return line_no, col_no
# The selected line is within the hunk.
if hunk_ref.section_end >= line_no:
hunk_change_index = line_no - hunk_ref.section_start - 1
change = hunk_ref.hunk.changes[hunk_change_index]
# If a removed line is selected, the cursor will be offset by non-existant
# columns of the removed lines. Therefore, move the cursor to column zero
# when removed line is selected.
return change.saved_pos, col_no if change.type == "+" else 0
# The selected line is after the hunk.
else:
lines_after_hunk_end = line_no - hunk_ref.section_end - 1
# Adjust line position for remove-only hunks.
if all(change.type == "-" for change in hunk_ref.hunk.changes):
lines_after_hunk_end += 1
hunk_end_in_saved = hunk_ref.hunk.saved_start + hunk_ref.hunk.saved_length
return hunk_end_in_saved + lines_after_hunk_end, col_no
def get_closest_hunk_ref_before(self, line_no):
hunks = diff_view_hunks[self.view.id()]
for hunk_ref in reversed(hunks):
if hunk_ref.section_start < line_no:
return hunk_ref
class GsInlineDiffNavigateHunkCommand(GsNavigate):
"""
Navigate to the next/previous hunk that appears after the current cursor
position.
"""
offset = 0
def get_available_regions(self):
return [
sublime.Region(
self.view.text_point(hunk.section_start, 0),
self.view.text_point(hunk.section_end + 1, 0))
for hunk in diff_view_hunks[self.view.id()]]
class GsInlineDiffUndo(TextCommand, GitCommand):
"""
Undo the last action taken in the inline-diff view, if possible.
"""
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
history = self.view.settings().get("git_savvy.inline_diff.history") or []
if not history:
return
last_args, last_stdin, encoding = history.pop()
# Toggle the `--reverse` flag.
last_args[2] = "--reverse" if not last_args[2] else None
self.git(*last_args, stdin=last_stdin, stdin_encoding=encoding)
self.view.settings().set("git_savvy.inline_diff.history", history)
self.view.run_command("gs_inline_diff_refresh")
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
"""
蓝鲸统一错误码处理
"""
bk_error_codes_conf = {
'REDIS_CONNECTION_ERROR': {
'code': '1306001',
'reason': 'Redis connection failed',
'solution': 'Check if the redis configuration is correct and if the service is normal',
},
'COMPONENT_REGISTER_ERROR': {
'code': '1306101',
'reason': 'Component code logic error, cannot be loaded',
'solution': 'Check the code logic based on the exception message to exclude exceptions',
},
'COMPONENT_COMP_CONF_ERROR': {
'code': '1306102',
'reason': 'The component configuration in the component channel is not a valid JSON string',
'solution': 'Check the component configuration, and the JSON string needs to be a dict or a list that can be converted to a dict', # noqa
},
'REQUEST_THIRD_PARTY_ERROR': {
'code': '1306201',
'reason': 'An exception appeared while requesting a third-party system interface',
'solution': 'Check if the third-party system interface service is normal',
},
'THIRD_PARTY_RESP_ERROR': {
'code': '1306202',
'reason': 'Return data from the third-party system interface is not a valid JSON string',
'solution': 'Check if the third-party system interface service is normal',
},
'REQUEST_SSL_ERROR': {
'code': '1306203',
'reason': 'A SSLError occurred while requesting a third-party system interface',
'solution': 'Check if the folder in the component configuration corresponding to SSL_ROOT_DIR exists, and if the certificates are valid', # noqa
},
'REQUEST_GSE_ERROR': {
'code': '1306204',
'reason': 'An error occurred while accessing the system GSE interface',
'solution': 'Check if the GSE system interface is normal',
},
'REQUEST_SMTP_ERROR': {
'code': '1306205',
'reason': 'An error occurred while accessing the SMTP email service',
'solution': 'Check if the sending email component SMTP configuration is correct, and if the SMTP email service is normal', # noqa
},
# thirt-party system error code
'REQUEST_JOB_ERROR': {
'code': 1306221,
'reason': 'An error occurred while accessing the system JOB service',
'solution': 'Check if the JOB system interface is normal',
},
}
class ErrorCode(object):
"""
Error Code class
"""
def __init__(self, code_name, code, reason, solution):
self.code_name = code_name
self.code = code
self.reason = reason
self.solution = solution
class BkErrorCodes(object):
def __init__(self):
self._error_codes_dict = dict([
(code_name, ErrorCode(code_name, **error_code))
for code_name, error_code in bk_error_codes_conf.iteritems()
])
def __getattr__(self, code_name):
return self._error_codes_dict[code_name]
bk_error_codes = BkErrorCodes()
|
def parse_transaction_as_dict(transaction):
transaction_dict = {}
return transaction |
from structure.constants import LAMBDA
import sys
from typing import List
def variable_and_alpha_review(variable_list : List, alpha_list : List, transitions_list : List, initial_variable : str):
'''
verify if there is a problem with the variables or
in the alpha for chosen language
'''
try:
if not(initial_variable in variable_list):
raise ValueError('There is no variable '+f'{initial_variable}'+', tip: check the JSON file')
except ValueError as e:
print(e)
sys.exit(1)
for transition in transitions_list:
variable = transition[0]
argument = transition[1]
try:
if not(variable in variable_list):
raise ValueError('There is no variable '+f'{variable}'+', tip: check the JSON file')
for arg in argument:
if not(arg in variable_list or arg in alpha_list or arg == LAMBDA):
raise ValueError('There is no variable or alphabet simbol for '+f'{arg}'+', tip: check the JSON file')
except ValueError as e:
print(e)
sys.exit(1)
|
with open('python\\2020\day5\data.txt') as f:
lines = f.readlines()
def getRowAndColumn(line):
row = 0
column = 0
#follow directions to split
first = 0
last = 127
for i in range(6):
difference = last-first
half = difference//2 + 1
if line[i] == 'B':
first += half
else:
last -= half
if line[6] == 'F':
row = first
else:
row = last
right = 0
left = 7
for i in range(7, 9):
difference = left-right
half = difference//2 + 1
if line[i] == 'R':
right += half
else:
left -= half
if line[9] == 'L':
column = right
else:
column = left
# print(line)
# print(row, column)
return row, column
def getSeatId(row, column):
return row*8 + column
def findMaxSeatId(lines):
max_seat_id = 0
for line in lines:
row, column = getRowAndColumn(line)
seat_id = getSeatId(row, column)
if seat_id > max_seat_id:
max_seat_id = seat_id
return max_seat_id
def findMySeat(lines):
seat_ids = []
for line in lines:
row, column = getRowAndColumn(line)
seat_id = getSeatId(row, column)
seat_ids.append(seat_id)
seat_ids.sort()
stop = False
i = 0
while not stop:
if seat_ids[i] == (seat_ids[i+1] - 1):
i+=1
else:
stop = True
print('found missing seat', seat_ids[i], seat_ids[i+1])
return seat_ids[i]+1
return 0
# print('max seat id is', findMaxSeatId(lines))
print('my seat id is', findMySeat(lines)) |
# -*- coding: utf-8 -*-
"""
| '_ \ / _` |_____ / __/ _ \| '_ ` _ \| '_ \ / _` | '__/ _ \
| |_) | (_| |_____| (_| (_) | | | | | | |_) | (_| | | | __/
| .__/ \__, | \___\___/|_| |_| |_| .__/ \__,_|_| \___|
|_| |___/ |_|
This is used to compare two databases. Takes two connection strings. One it
considers truth and another to test against it. Used to determine that both
databases are the same.
"""
import time
from models import AttributeContainer
config = AttributeContainer()
config.truth_db_conn_string = None
config.test_db_conn_string = None
config.truth_db = None
config.test_db = None
config.outfile = None
config.available_tests = []
config.start_time = time.time()
if __name__ == '__main__':
pass
|
from __future__ import (
absolute_import,
print_function,
unicode_literals
)
import sys
def print_stderr(message):
"""
Simply print str message to stderr
"""
print(message, file=sys.stderr)
def force_exit(message):
"""
Exit the program due to some error. Print out message to stderr
"""
print_stderr(message)
sys.exit(1)
|
from proxybroker.api import Broker
import requests
import asyncio
import logging
import multiprocessing
from queue import Empty
from datetime import datetime
logging.basicConfig(level=logging.INFO)
class ListMaker:
def __init__(
self, limit=40, max_resp_time=1.0, countries=None, max_list_length=1000,
anonymity=None, types=['HTTP', 'HTTPS', 'SOCKS4', 'SOCKS5'],
test_sites=['https://google.com', 'https://en.wikipedia.org', 'https://nhentai.net']):
try:
multiprocessing.set_start_method('spawn')
except RuntimeError:
if multiprocessing.get_start_method() is not 'spawn':
raise RuntimeError(
"Multiprocessing method of starting child processes has to be 'spawn'")
self.results_queue = multiprocessing.Queue()
self._poison_pill = multiprocessing.Event()
self._proxy_finder = ProxyFinderProcess(
self.results_queue, self._poison_pill, limit=limit,
max_resp_time=max_resp_time, countries=countries,
anonymity=anonymity, types=types, test_sites=test_sites)
self.proxy_list = []
self.max_list_length = max_list_length
def start(self):
self._proxy_finder.start()
logging.info("A new proxy finder is born.")
def get_n_proxies(self, n):
self.start()
while True:
try:
proxy = self.results_queue.get_nowait()
except Empty:
break
else:
self.proxy_list.append(proxy)
n -= 1
if n <= 0:
break
self.stop()
return self.get_simple_list()
def stop(self):
self._poison_pill.set()
self._proxy_finder.join()
logging.info("The proxy finder is poisoned to death.")
def update_proxies(self):
while True:
try:
proxy, fetched_at = self.results_queue.get_nowait()
except Empty:
break
else:
self.proxy_list.append((proxy, fetched_at))
if len(self.proxy_list) > self.max_list_length:
self.proxy_list = self.proxy_list[self.max_list_length//4:]
def get_list(self):
return self.proxy_list
def get_simple_list(self):
simple_list = []
for proxy, fetched_at in self.proxy_list:
proxy_type = list(proxy.types.keys())[0]
anonymity = None
if 'HTTP' == proxy_type:
anonymity = proxy.types['HTTP']
simple_list.append({
'type': proxy_type,
'host': proxy.host,
'port': proxy.port,
'avg_resp_time': proxy.avg_resp_time,
'country_code': proxy.geo[0],
'country_name': proxy.geo[1],
'is_working': proxy.is_working,
'anonymity': anonymity,
'fetched_at': fetched_at
})
return simple_list
class ProxyFinderProcess(multiprocessing.Process):
def __init__(self, proxy_queue, poison_pill, limit=40,
max_resp_time=1.0, countries=None, anonymity=None,
types=['HTTP', 'HTTPS', 'SOCKS4', 'SOCKS5'],
test_sites=['https://google.com', 'https://en.wikipedia.org']):
multiprocessing.Process.__init__(self)
self.results_queue = proxy_queue
self.poison_pill = poison_pill
self.max_resp_time = max_resp_time
if types is not None and anonymity is not None and "HTTP" in types:
types.remove("HTTP")
types.append(("HTTP", tuple(anonymity)))
self.types = types
self.countries = countries
self.limit = limit
self.proxy_list = []
self.test_sites = test_sites
def _basic_test_proxy(self, proxy):
if proxy.avg_resp_time > self.max_resp_time:
return False
proxy_type = list(proxy.types.keys())[0].lower()
url = "{0}://{1}:{2}".format(proxy_type, proxy.host, str(proxy.port))
logging.debug("Proxy URL: %s" % (url,))
proxy_dict = {'http': url, 'https': url, 'ftp': url}
for test_site in self.test_sites:
try:
requests.get(test_site, proxies=proxy_dict, timeout=1)
logging.info("Working Proxy: %s" % (proxy,))
except (requests.exceptions.ProxyError,
requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
return False
return True
async def async_to_results(self):
while not self.poison_pill.is_set():
proxy = await self.async_queue.get()
if proxy is None:
break
else:
if self._basic_test_proxy(proxy) is False:
continue
self.results_queue.put((proxy, datetime.now()))
self.broker.stop()
def run(self):
self.async_queue = asyncio.Queue()
self.broker = Broker(queue=self.async_queue, timeout=2, max_tries=1, verify_ssl=True)
self.tasks = asyncio.gather(
self.broker.find(types=self.types, countries=self.countries,
strict=True, limit=self.limit),
self.async_to_results())
self.loop = asyncio.get_event_loop()
self.loop.run_until_complete(self.tasks)
# For testing
# list_maker = ListMaker(limit=8)
# list_maker.make_list()
# print(list_maker.get_list())
# print(list_maker.get_simple_list())
|
import os
import sys
import re
import numpy as np
import math
from itertools import repeat
from indexparser import parse_index, rev_parse_index
from parse49 import parse_49
from genfchk import quicksave
VERSION = 'v4.1' # Internal version numbering
VERSIONTEXT = '''
Original version honorably delivered by Acid&Francium (with a blast).
Most recent update on 13 Aug 2021.
'''
REFERENCE_LIST = '''
Original publication of PIO: doi.org/10.1002/chem.201801220
Extension to spin-polarized systems: doi.org/10.1039/D0CP00127A
A recent review on PIO: doi.org/10.1002/wcms.1469
'''
def myeigh(mat, rev=False):
evals, evecs = np.linalg.eigh(mat)
evecs = evecs.T
if rev:
order = evals.argsort()[::-1]
else:
order = evals.argsort()
evals = evals[order]
evecs = evecs[order]
return evals, evecs
def PIO(ffchk, fn49='', fragmentation=None, silent=False):
path, fn = os.path.split(ffchk)
title, ext = os.path.splitext(fn)
if fn49 == '':
for fn in [os.path.join(path, title + '.49'),
os.path.join(path, title.upper() + '.49'),
os.path.join(path, title.upper() + 'FILE'[len(title)-4:] + '.49')]:
if os.path.isfile(fn):
fn49 = fn
break
else:
raise UserWarning('.49 file not found.')
raw = parse_49(fn49)
naoao = raw['NAOAO']
naolabels = raw['NAOAOlabel']
if 'DNAO' in raw:
dim_spin = 1
dmnao = np.array([raw['DNAO']])
if 'FNAO' in raw:
fmnao = np.array([raw['FNAO']])
else:
fmnao = None
else:
assert 'DNAO1' in raw
assert 'DNAO2' in raw
dim_spin = 2
dmnao = np.array([raw['DNAO1'], raw['DNAO2']])
if 'FNAO1' in raw and 'FNAO2' in raw:
fmnao = np.array([raw['FNAO1'], raw['FNAO2']])
else:
fmnao = None
dim_nao, dim_bs = naoao.shape
assert dmnao.shape == (dim_spin, dim_nao, dim_nao)
if not silent:
print('Density matrix read in.')
if fragmentation is None:
prompt = '$ Please input the atom ID of two fragments: (e.g. 1-5,8,13 6-7,9-12)\n$ '
fragmentation = input(prompt)
fragments = fragmentation.strip().lower().split()
assert len(fragments) == 2
atoms1 = parse_index(fragments[0])
atoms2 = parse_index(fragments[1])
oids1 = np.array([orb for orb in range(dim_nao) if naolabels[orb].center in atoms1])
oids2 = np.array([orb for orb in range(dim_nao) if naolabels[orb].center in atoms2])
assert len(set(oids1)) == len(oids1)
assert len(set(oids2)) == len(oids2)
assert not set(oids1).intersection(set(oids2))
if set(oids1).union(set(oids2)) != set(range(dim_nao)):
print('Warning: Incomplete fragmentation detected!')
if set([naolabels[orb].center for orb in oids1]) != set(atoms1):
print('Warning: Ghost atoms detected in fragment A!')
if set([naolabels[orb].center for orb in oids2]) != set(atoms2):
print('Warning: Ghost atoms detected in fragment B!')
data = {}
data['srcfile'] = fn49
data['title'] = title
data['dim'] = (dim_spin, dim_nao, dim_bs)
data['fragments'] = (sorted(set([naolabels[orb].center for orb in oids1])),
sorted(set([naolabels[orb].center for orb in oids2])))
data['oids'] = (oids1, oids2)
data['NAOAO'] = naoao
data['DNAO'] = dmnao
if fmnao is not None:
data['FNAO'] = fmnao
data['NAOAOlabel'] = naolabels
data.update(genPIO(dmnao, oids1, oids2, fmnao=fmnao, silent=silent))
npio = data['#PIO']
pbi = data['PBI']
pbio = np.zeros((dim_spin, dim_nao))
for spin in range(dim_spin):
pbio[spin,oids1[:npio]] = pbi[spin,:npio]
pbio[spin,oids2[:npio]] = pbi[spin,:npio]
data['PBIO'] = pbio
save(data, ffchk, silent=silent)
return data
def genPIO(dmnao, oids1, oids2, fmnao=None, tol=1e-6, silent=False):
dim_spin = dmnao.shape[0]
dim = dmnao.shape[-1]
assert dmnao.shape == (dim_spin, dim, dim)
oids1, oids2 = np.array(oids1), np.array(oids2)
npio = min(len(oids1), len(oids2))
pionao = np.zeros((dim_spin, dim, dim))
pbi = np.zeros((dim_spin, npio))
pimonao = np.zeros((dim_spin, dim, dim))
for spin, dmnao_spin in enumerate(dmnao):
vals1, vecs1 = myeigh(dmnao_spin[oids1[:,None],oids1])
vals2, vecs2 = myeigh(dmnao_spin[oids2[:,None],oids2])
off_block = vecs1.dot(dmnao_spin[oids1[:,None],oids2]).dot(vecs2.T)
off_block[abs(off_block) < tol] = 0
U, D, V = np.linalg.svd(off_block, full_matrices=True)
U = vecs1.T.dot(U)
V = V.dot(vecs2)
D2 = D**2
pionao[spin,oids1[:,None],oids1] = U.T
pionao[spin,oids2[:,None],oids2] = V
pbi[spin,:npio] = D2 * dim_spin
dmpio = pionao[spin].dot(dmnao[spin]).dot(pionao[spin].T)
nep = int(math.ceil(np.round(dmnao[spin].trace()) * dim_spin / 2))
for nullspace in (np.array(oids1)[min(nep,npio):], np.array(oids2)[min(nep,npio):]):
evals, evecs = myeigh(dmpio[nullspace[:,None],nullspace], rev=True)
pionao[spin,nullspace] = evecs.dot(pionao[spin,nullspace])
try:
if fmnao is not None:
fm = pionao[spin].dot(fmnao[spin]).dot(pionao[spin].T)
else:
fm = pionao[spin].dot(dmnao[spin]).dot(pionao[spin].T)
null2 = nullspace[abs(evals-2)<tol]
null0 = nullspace[abs(evals-0)<tol]
evals, evecs = myeigh(fm[null2[:,None],null2])
pionao[spin,null2] = evecs.dot(pionao[spin,null2])
evals, evecs = myeigh(fm[null0[:,None],null0])
pionao[spin,null0] = evecs.dot(pionao[spin,null0])
except NameError:
pass
pimonao[spin] = pionao[spin]
for oid1, oid2 in list(zip(oids1, oids2))[:nep]:
D = dmpio[[oid1, oid2]][:,[oid1,oid2]]
evals, evecs = myeigh(D,rev=True)
evecs *= np.sign(evecs.sum(axis=1))[:,None]
pimonao[spin,[oid1,oid2]] = evecs.dot(pionao[spin,[oid1,oid2]])
if not silent:
print('PIO analysis completed.')
data = {'#PIO':npio, 'PIONAO': pionao, 'PIMONAO': pimonao, 'PBI': pbi}
return data
def save(data, ffchk, silent=False):
savetxt(data, os.path.splitext(ffchk)[0]+'_pio.txt', silent=silent)
savefchk(data, ffchk, silent=silent)
# saveawc(data, os.path.splitext(ffchk)[0]+'_pio.awc', silent=silent) # Internal use only for now
def savefchk(data, ffchk, silent=False):
naoao = data['NAOAO']
pionao = data['PIONAO']
pimonao = data['PIMONAO']
pbio = data['PBIO']
pioao = pionao.dot(naoao)
pimoao = pimonao.dot(naoao)
quicksave(ffchk, pioao, pbio, suffix='_pio', overwrite=True)
if not silent:
print('PIO saved to fchk files.')
quicksave(ffchk, pimoao, pbio, suffix='_pimo', overwrite=True)
if not silent:
print('PIMO saved to fchk files.')
def savetxt(data, ofn, silent=False):
atoms1, atoms2 = data['fragments']
dim_spin, dim_pio, dim_bs = data['dim']
oids1, oids2 = data['oids']
naoao = data['NAOAO']
pbi = data['PBI']
pionao = data['PIONAO']
pimonao = data['PIMONAO']
dmnao = data['DNAO']
fmnao = data.get('FNAO', np.zeros_like(dmnao))
npio = data['#PIO']
dmpio = np.array([pionao[_].dot(dmnao[_]).dot(pionao[_].T) for _ in range(dim_spin)])
fmpio = np.array([pionao[_].dot(fmnao[_]).dot(pionao[_].T) for _ in range(dim_spin)])
dmpimo = np.array([pimonao[_].dot(dmnao[_]).dot(pimonao[_].T) for _ in range(dim_spin)])
fmpimo = np.array([pimonao[_].dot(fmnao[_]).dot(pimonao[_].T) for _ in range(dim_spin)])
total = pbi.sum()
mod = '%5d%1s %10.5f %10.5f %5d%1s %10.5f %10.5f %10.5f %10.5f %8.2f %10.5f %10.5f %10.5f %10.5f %10.5f %10.5f'
modt = re.sub('(\.\d+)?[fd]', 's', mod)
with open(ofn, 'w') as f:
f.write('PIO %s\n' % VERSION)
f.write('%s\n' % VERSIONTEXT)
f.write('Fragment A: %s (Orbitals: %s)\n' % (rev_parse_index(atoms1), rev_parse_index(oids1+1)))
f.write('Fragment B: %s (Orbitals: %s)\n' % (rev_parse_index(atoms2), rev_parse_index(oids2+1)))
if dim_spin == 1:
f.write('Total interaction: %f\n' % total)
else:
f.write('Total interaction: %f\n' % total)
f.write('Alpha interaction: %f\n' % pbi[0].sum())
f.write('Beta interaction: %f\n' % pbi[1].sum())
f.write('\n')
for spin in range(dim_spin):
nep = int(math.ceil(np.round(dmnao[spin].trace()) * dim_spin / 2))
pbispin = pbi[spin]
pop1 = np.diag(dmpio[spin])[oids1]
pop2 = np.diag(dmpio[spin])[oids2]
energy1 = np.diag(fmpio[spin])[oids1]
energy2 = np.diag(fmpio[spin])[oids2]
popb = np.diag(dmpimo[spin])[oids1]
popa = np.diag(dmpimo[spin])[oids2]
energyb = np.diag(fmpimo[spin])[oids1]
energya = np.diag(fmpimo[spin])[oids2]
contrib = pbispin / total * 100
cum = np.cumsum(contrib)
ie = [fmpio[spin][i][j] for i, j in zip(oids1, oids2)]
rie = [fmpimo[spin][i][j] for i, j in zip(oids1, oids2)]
if dim_spin == 2:
spincode = 'ALPHA' if spin == 0 else 'BETA'
f.write('---%s---\n' % spincode)
f.write('Fragment A'.center(26))
f.write('Fragment B'.center(26))
f.write('\n')
f.write(modt % ('Or', 'b', 'Pop ', 'E ', 'Or', 'b', 'Pop ', 'E ',
'Ixn ', 'Contrib%', 'Cum%', 'IE ',
'EB ', 'EA ', 'PopB ', 'PopA ', 'RIE '))
f.write('\n')
if dim_spin == 2:
orbspin = 'a' if spin == 0 else 'b'
else:
orbspin = ''
text = list(zip(oids1 + 1, repeat(orbspin), pop1, energy1,
oids2 + 1, repeat(orbspin), pop2, energy2,
pbispin[:min(nep,npio)], contrib, cum, ie,
energyb, energya, popb, popa, rie))
while text:
f.write(mod % text.pop(0))
f.write('\n')
f.write('\n\n')
f.close()
if not silent:
print('PIO result saved to file.')
def saveawc(data, ofn, silent=False):
dim_spin, dim_pio, dim_bas = data['dim']
naolabels = data['NAOAOlabel']
pionao = data['PIONAO']
pbio = data['PBIO']
assert pbio.shape == (dim_spin, dim_pio)
assert len(naolabels) == dim_pio
assert pionao.shape == (dim_spin, dim_pio, dim_pio)
atoms = sorted(set([nao.center for nao in naolabels]))
awc = np.zeros((dim_spin, dim_pio, len(atoms)))
for spin in range(dim_spin):
for pio in range(dim_pio):
for nao in range(dim_pio):
awc[spin, pio, naolabels[nao].center-1] += pionao[spin,pio,nao] ** 2
with open(ofn, 'w') as f:
for spin in range(dim_spin):
if dim_spin == 2:
spincode = 'ALPHA' if spin == 0 else 'BETA'
f.write('---%s---\n' % spincode)
for pio in range(dim_pio):
if pbio[spin,pio] >= 0.01 / dim_spin:
f.write('PIO %-4d\n' % (pio + 1))
for at in range(len(atoms)):
if awc[spin, pio, at] > 0.01:
f.write(' %-4d %8.4f\n' % (at + 1, awc[spin, pio, at]))
for nao in range(dim_pio):
if naolabels[nao].center == at + 1 and pionao[spin,pio,nao] ** 2 >= 0.01:
f.write(' %-8s %8.4f\n' % (naolabels[nao].label(), pionao[spin,pio,nao] ** 2))
f.write('\n\n')
f.close()
if not silent:
print('Atom&NAO contributions saved to file.')
def main():
ffchk = sys.argv[1]
fn49 = sys.argv[2] if sys.argv[2:] else ''
fragmentation = sys.argv[3:5] if sys.argv[3:] else None
PIO(ffchk, fn49, fragmentation)
if __name__ == '__main__':
main()
|
from .base import loader
|
from net_conf import *
RULES_DECODER_STATE_SIZE = RULES_QUERY_ENCODER_STATE_SIZE * 2
WORDS_DECODER_STATE_SIZE = RULES_ENCODER_STATE_SIZE * 2
DJANGO_DATA_SET_TYPE = 'DJANGO'
HS_DATA_SET_TYPE = 'HS'
DATA_SET_TYPE = DJANGO_DATA_SET_TYPE
DATA_SET_BASE_DIR = '../django_data_set/data/'
DATA_SET_NAME = 'django_data_set_v2'
FULL_DATA_SET_NAME = 'django.cleaned.dataset.freq3.par_info.refact.space_only.order_by_ulink_len.bin'
MODEL_SAVE_PATH = 'trained/'
RULES_MODEL_BASE_NAME = 'model_rules'
WORDS_MODEL_BASE_NAME = 'model_words'
SELECTOR_MODEL_BASE_NAME = RULES_MODEL_BASE_NAME + '_selector'
BEST_RULES_MODEL_BASE_NAME = RULES_MODEL_BASE_NAME + '-6'
BEST_WORDS_MODEL_BASE_NAME = WORDS_MODEL_BASE_NAME + '-3'
BEST_SELECTOR_MODEL_BASE_NAME = SELECTOR_MODEL_BASE_NAME + '-2'
PRETRAIN_SAVE_PATH = 'pretrained/'
PRETRAIN_BASE_NAME = 'pretrain'
RULES_BEAM_SIZE = 15
RULE_TREE_BEAM_SIZE = 3
|
# -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),
# 您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、
# 本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,
# 否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 public@ricequant.com 获取。
import os
import collections
from copy import deepcopy
from importlib import import_module
import six
def deep_update(from_dict, to_dict):
for (key, value) in from_dict.items():
if (key in to_dict.keys() and
isinstance(to_dict[key], collections.Mapping) and
isinstance(value, collections.Mapping)):
deep_update(value, to_dict[key])
else:
to_dict[key] = value
def import_tests(dir, module):
strategies = {}
for file in os.listdir(dir):
file_path = os.path.join(dir, file)
if os.path.isdir(file_path):
if not file.startswith("__"):
strategies.update(import_tests(file_path, ".".join((module, file))))
else:
if file.endswith(".py") and file.startswith("test"):
m = import_module(".".join((module, file[:-3])), "tests.api_tests")
default_config = m.__dict__.pop("__config__", {})
for obj in six.itervalues(m.__dict__):
if hasattr(obj, "__call__") and getattr(obj, "__name__", "").startswith("test"):
strategy_locals = obj()
custom_config = strategy_locals.pop("__config__", {})
config = deepcopy(default_config)
if custom_config:
deep_update(custom_config, config)
strategy_locals["config"] = config
strategy_locals["name"] = obj.__name__
strategies[".".join((module, file[:-3], obj.__name__))] = strategy_locals
return strategies
strategies = list(six.itervalues(import_tests(os.path.dirname(__file__), "")))
|
from rest_framework import status
from rest_framework import viewsets, parsers
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from .models import Billing
from .models import DropBox
from .serializers import BillingSerializer
from .serializers import DropBoxSerializer
class BillingViewSet(viewsets.ModelViewSet):
queryset = Billing.objects.all()
serializer_class = BillingSerializer
permission_classes = (AllowAny,)
class DropBoxViewset(viewsets.ModelViewSet):
queryset = DropBox.objects.all()
serializer_class = DropBoxSerializer
parser_classes = [parsers.MultiPartParser, parsers.FormParser]
http_method_names = ['get', 'post', 'patch', 'delete']
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def list(self, request, *args, **kwargs):
user = request.user
list_files = DropBox.objects.filter(user=user.id)
serializer = DropBoxSerializer(list_files,many=True)
print(serializer)
response = {'message': 'Rating updated', 'result': serializer.data}
return Response(response, status=status.HTTP_200_OK)
def retrieve (self,request ,*args, **kwargs):
response = {'message': 'Not allowed'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
response = {'message': 'Not allowed'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
|
# -*- coding: utf-8 -*-
import json
from typing import Dict
from .block_hash import BlockHash
from .consensus_info import ConsensusInfo
from ..utils.encoding import encode_b58
class BlockchainStatus:
def __init__(self, status):
self._status = status
self._bbh = BlockHash(self._status.best_block_hash)
self._consensus_info = ConsensusInfo(self._status.consensus_info)
@property
def best_block_hash(self) -> BlockHash:
return self._bbh
@property
def best_block_height(self) -> int:
return self._status.best_height
@property
def best_chain_id_hash(self) -> bytes:
return self._status.best_chain_id_hash
@property
def best_chain_id_hash_b58(self) -> str:
b58_hash = encode_b58(self._status.best_chain_id_hash)
assert b58_hash
return b58_hash
@property
def consensus_info(self) -> ConsensusInfo:
return self._consensus_info
def json(self) -> Dict:
return {
"best_block_hash": str(self.best_block_hash),
"best_block_height": self.best_block_height,
"best_chain_id_hash": self.best_chain_id_hash_b58,
"consensus_info": self.consensus_info.json(),
}
def __str__(self) -> str:
return json.dumps(self.json(), indent=2)
|
from django.contrib import admin
from .models import Profile,Post,Health,Business,Police,Neighbourhood
# Register your models here.
admin.site.register(Profile)
admin.site.register(Post)
admin.site.register(Business)
admin.site.register(Health)
admin.site.register(Police)
admin.site.register(Neighbourhood)
|
from SinGAN_models import *
from options import *
from utility_functions import *
import torch.nn.functional as F
import torch
import os
import imageio
import argparse
from typing import Union, Tuple
from matplotlib.pyplot import cm
from math import log
import matplotlib.pyplot as plt
from skimage.transform.pyramids import pyramid_reduce
from scipy import signal
import tidynamics
from numpy import sqrt, zeros, conj, pi, arange, ones, convolve
def get_ks(x, y, z, xmax, ymax, zmax, device):
xxx = torch.arange(x,device=device).type(torch.cuda.FloatTensor).view(-1, 1,1).repeat(1, y, z)
yyy = torch.arange(y,device=device).type(torch.cuda.FloatTensor).view(1, -1,1).repeat(x, 1, z)
zzz = torch.arange(z,device=device).type(torch.cuda.FloatTensor).view(1, 1,-1).repeat(x, y, 1)
xxx[xxx>xmax] -= xmax*2
yyy[yyy>ymax] -= ymax*2
zzz[zzz>zmax] -= zmax*2
ks = (xxx*xxx + yyy*yyy + zzz*zzz) ** 0.5
ks = torch.round(ks).type(torch.LongTensor)
return ks
def movingaverage(interval, window_size):
window= ones(int(window_size))/float(window_size)
return convolve(interval, window, 'same')
def generate_patchwise(generator, LR, mode):
#print("Gen " + str(i))
patch_size = 64
rf = int(generator.receptive_field() / 2)
generated_image = torch.zeros(LR.shape).to(opt['device'])
for z in range(0,generated_image.shape[2], patch_size-2*rf):
z = min(z, max(0, generated_image.shape[2] - patch_size))
z_stop = min(generated_image.shape[2], z + patch_size)
for y in range(0,generated_image.shape[3], patch_size-2*rf):
y = min(y, max(0, generated_image.shape[3] - patch_size))
y_stop = min(generated_image.shape[3], y + patch_size)
for x in range(0,generated_image.shape[4], patch_size-2*rf):
x = min(x, max(0, generated_image.shape[4] - patch_size))
x_stop = min(generated_image.shape[4], x + patch_size)
if(mode == "reconstruct"):
#noise = generator.optimal_noise[:,:,z:z_stop,y:y_stop,x:x_stop]
noise = torch.zeros([1, 3,z_stop-z,y_stop-y,x_stop-x], device="cuda")
elif(mode == "random"):
noise = torch.randn([generated_image.shape[0], generated_image.shape[1],
z_stop-z,y_stop-y,x_stop-x], device=opt['device'])
#print("[%i:%i, %i:%i, %i:%i]" % (z, z_stop, y, y_stop, x, x_stop))
result = generator(LR[:,:,z:z_stop,y:y_stop,x:x_stop],
noise)
x_offset = rf if x > 0 else 0
y_offset = rf if y > 0 else 0
z_offset = rf if z > 0 else 0
generated_image[:,:,
z+z_offset:z+noise.shape[2],
y+y_offset:y+noise.shape[3],
x+x_offset:x+noise.shape[4]] = result[:,:,z_offset:,y_offset:,x_offset:]
return generated_image
def compute_tke_spectrum_pytorch(u,v,w,lx,ly,lz,smooth):
import torch.fft
nx = len(u[:,0,0])
ny = len(v[0,:,0])
nz = len(w[0,0,:])
nt= nx*ny*nz
n = nx #int(np.round(np.power(nt,1.0/3.0)))
uh = torch.fft.fft(u)/nt
vh = torch.fft.fft(v)/nt
wh = torch.fft.fft(w)/nt
tkeh = torch.zeros((nx,ny,nz))
tkeh = 0.5*(uh*torch.conj(uh) + vh*torch.conj(vh) + wh*torch.conj(wh)).real
k0x = 2.0*pi/lx
k0y = 2.0*pi/ly
k0z = 2.0*pi/lz
knorm = (k0x + k0y + k0z)/3.0
kxmax = nx/2
kymax = ny/2
kzmax = nz/2
wave_numbers = knorm*torch.arange(0,n)
tke_spectrum = torch.zeros([len(wave_numbers)])
ks = get_ks(nx, ny, nz, kxmax, kymax, kzmax, "cuda:0")
for k in range(0, min(len(tke_spectrum), ks.max())):
tke_spectrum[k] = torch.sum(tkeh[ks == k]).item()
#tkeh = tkeh.cpu().numpy()
tke_spectrum = tke_spectrum/knorm
# tke_spectrum = tke_spectrum[1:]
# wave_numbers = wave_numbers[1:]
if smooth:
tkespecsmooth = movingaverage(tke_spectrum, 5) #smooth the spectrum
tkespecsmooth[0:4] = tke_spectrum[0:4] # get the first 4 values from the original data
tke_spectrum = tkespecsmooth
knyquist = knorm*min(nx,ny,nz)/2
return knyquist, wave_numbers, tke_spectrum
def compute_tke_spectrum(u,v,w,lx,ly,lz,smooth):
"""
Given a velocity field u, v, w, this function computes the kinetic energy
spectrum of that velocity field in spectral space. This procedure consists of the
following steps:
1. Compute the spectral representation of u, v, and w using a fast Fourier transform.
This returns uf, vf, and wf (the f stands for Fourier)
2. Compute the point-wise kinetic energy Ef (kx, ky, kz) = 1/2 * (uf, vf, wf)* conjugate(uf, vf, wf)
3. For every wave number triplet (kx, ky, kz) we have a corresponding spectral kinetic energy
Ef(kx, ky, kz). To extract a one dimensional spectrum, E(k), we integrate Ef(kx,ky,kz) over
the surface of a sphere of radius k = sqrt(kx^2 + ky^2 + kz^2). In other words
E(k) = sum( E(kx,ky,kz), for all (kx,ky,kz) such that k = sqrt(kx^2 + ky^2 + kz^2) ).
Parameters:
-----------
u: 3D array
The x-velocity component.
v: 3D array
The y-velocity component.
w: 3D array
The z-velocity component.
lx: float
The domain size in the x-direction.
ly: float
The domain size in the y-direction.
lz: float
The domain size in the z-direction.
smooth: boolean
A boolean to smooth the computed spectrum for nice visualization.
"""
nx = len(u[:,0,0])
ny = len(v[0,:,0])
nz = len(w[0,0,:])
nt= nx*ny*nz
n = nx #int(np.round(np.power(nt,1.0/3.0)))
uh = np.fft.fftn(u)/nt
vh = np.fft.fftn(v)/nt
wh = np.fft.fftn(w)/nt
tkeh = zeros((nx,ny,nz))
tkeh = 0.5*(uh*conj(uh) + vh*conj(vh) + wh*conj(wh)).real
k0x = 2.0*pi/lx
k0y = 2.0*pi/ly
k0z = 2.0*pi/lz
knorm = (k0x + k0y + k0z)/3.0
kxmax = nx/2
kymax = ny/2
kzmax = nz/2
wave_numbers = knorm*arange(0,n)
tke_spectrum = zeros(len(wave_numbers))
ks = get_ks(nx, ny, nz, kxmax, kymax, kzmax, "cuda:0")
tkeh = np2torch(tkeh, "cuda:0")
for k in range(0, min(len(tke_spectrum), ks.max())):
tke_spectrum[k] = torch.sum(tkeh[ks == k]).item()
tkeh = tkeh.cpu().numpy()
'''
for kx in range(nx):
rkx = kx
if (kx > kxmax):
rkx = rkx - (nx)
for ky in range(ny):
rky = ky
if (ky>kymax):
rky=rky - (ny)
for kz in range(nz):
rkz = kz
if (kz>kzmax):
rkz = rkz - (nz)
rk = sqrt(rkx*rkx + rky*rky + rkz*rkz)
k = int(np.round(rk))
tke_spectrum[k] = tke_spectrum[k] + tkeh[kx,ky,kz]
'''
tke_spectrum = tke_spectrum/knorm
# tke_spectrum = tke_spectrum[1:]
# wave_numbers = wave_numbers[1:]
if smooth:
tkespecsmooth = movingaverage(tke_spectrum, 5) #smooth the spectrum
tkespecsmooth[0:4] = tke_spectrum[0:4] # get the first 4 values from the original data
tke_spectrum = tkespecsmooth
knyquist = knorm*min(nx,ny,nz)/2
return knyquist, wave_numbers, tke_spectrum
if __name__ == '__main__':
MVTVSSR_folder_path = os.path.dirname(os.path.abspath(__file__))
input_folder = os.path.join(MVTVSSR_folder_path, "InputData")
output_folder = os.path.join(MVTVSSR_folder_path, "Output")
save_folder = os.path.join(MVTVSSR_folder_path, "SavedModels")
parser = argparse.ArgumentParser(description='Test a trained model')
parser.add_argument('--load_from',default="128_GP_0.5")
parser.add_argument('--data_folder',default="JHUturbulence/isotropic128_3D",type=str,help='File to test on')
parser.add_argument('--data_folder_diffsize',default="JHUturbulence/isotropic512_3D",type=str,help='File to test on')
parser.add_argument('--device',default="cuda:0",type=str,help='Frames to use from training file')
args = vars(parser.parse_args())
opt = Options.get_default()
opt = load_options(os.path.join(save_folder, args["load_from"]))
opt["device"] = args["device"]
opt["save_name"] = args["load_from"]
generators, discriminators = load_models(opt,args["device"])
gen_to_use = 0
for i in range(len(generators)):
generators[i] = generators[i].to(args["device"])
generators[i] = generators[i].eval()
for i in range(len(discriminators)):
discriminators[i].to(args["device"])
discriminators[i].eval()
dataset2 = Dataset(os.path.join(input_folder, args["data_folder"]), opt)
dataset = Dataset(os.path.join(input_folder, args["data_folder_diffsize"]), opt)
f = dataset[0].cuda()
a = dataset.unscale(f).cpu().numpy()[0]
b, c, d = compute_tke_spectrum(a[0], a[1], a[2],
2 * pi * (a.shape[1] / 1024.0), 2 * pi * (a.shape[2] / 1024),
2 * pi * (a.shape[3] / 1024.0), True)
ks = []
dis=0.0928
for i in c:
if(i == 0):
ks.append(1.6 * (dis**(2.0/3.0)))
else:
ks.append(1.6 * (dis**(2.0/3.0)) * ((i) **(-5.0/3.0)))
ks = np.array(ks)
c[0] += 1
plt.plot(c[c < b], ks[c < b], color='black')
plt.plot(c[c < b], d[c < b], color='red')
f_lr = laplace_pyramid_downscale3D(f, opt['n']-gen_to_use-1,
opt['spatial_downscale_ratio'],
opt['device'])
f_trilin = dataset.unscale(F.interpolate(f_lr, mode='trilinear', size=[512, 512, 512])).cpu()[0].numpy()
b, c, d = compute_tke_spectrum(f_trilin[0], f_trilin[1], f_trilin[2],
2 * pi * (f_trilin.shape[1] / 1024.0), 2 * pi * (f_trilin.shape[2] / 1024),
2 * pi * (f_trilin.shape[3] / 1024.0), True)
plt.plot(c[c < b], d[c < b], color='blue')
with torch.no_grad():
singan_output = f_lr.clone()
singan_output = F.interpolate(singan_output, size=[256, 256, 256], mode='trilinear', align_corners=True)
singan_output = generate_patchwise(generators[1], singan_output,
"reconstruct")
b, c, d = compute_tke_spectrum(singan_output[0], singan_output[1], singan_output[2],
2 * pi * (singan_output.shape[1] / 1024.0), 2 * pi * (singan_output.shape[2] / 1024),
2 * pi * (singan_output.shape[3] / 1024.0), True)
plt.plot(c[c < b], d[c < b], color='green')
with torch.no_grad():
singan_output = F.interpolate(singan_output, size=[512, 512, 512], mode='trilinear', align_corners=True)
singan_output = generate_patchwise(generators[2], singan_output,
"reconstruct")
b, c, d = compute_tke_spectrum(singan_output[0], singan_output[1], singan_output[2],
2 * pi * (singan_output.shape[1] / 1024.0), 2 * pi * (singan_output.shape[2] / 1024),
2 * pi * (singan_output.shape[3] / 1024.0), True)
plt.plot(c[c < b], d[c < b], color='orange')
plt.yscale("log", nonpositive="clip")
plt.xscale("log", nonpositive="clip")
plt.xlabel("wavenumber")
plt.title("Energy Spectra")
plt.legend(["1.6*epsilon^(2/3)*k^(-5/3)", "Ground truth data", "Trilinear",
"SinGAN - intermediate", "SinGAN - final"])
plt.show()
|
# coding=utf-8
from time import sleep
from gpio_wrapper import gpio_commands
from utils import DbSession
import pickledb
import settings
class DeviceWrongAction(Exception):
pass
class Device:
ACTION_SWITCH = 1
ACTION_PULSE = 2
def __init__(self, name, pin, action, reset_state=True):
self.name = name
self.pin = pin
if action not in (Device.ACTION_PULSE, Device.ACTION_SWITCH):
raise DeviceWrongAction("Wrong action selected")
self.action = action
if reset_state:
gpio_commands.mode(pin=self.pin, set_mode='out')
gpio_commands.write(pin=self.pin, value=0)
# We assume that the initial state is "off". It is impossible to read
# actual state as well as the state can be interfered by the physical
# buttons (in case of "PULSE" switches). I will deal with that later. (FIXME)
self.write_state(state=0)
def switch(self):
if self.action == Device.ACTION_SWITCH:
status = int(not self.read_state())
gpio_commands.write(pin=self.pin, value=status)
self.write_state(status)
elif self.action == Device.ACTION_PULSE:
gpio_commands.write(pin=self.pin, value=1)
sleep(.1)
gpio_commands.write(pin=self.pin, value=0)
self.write_state(int(not self.read_state()))
def read_state(self):
db = pickledb.load(settings.state_db_file, False)
state = db.get("state"+str(self.pin))
return state
def write_state(self, state):
db = pickledb.load(settings.state_db_file, False)
db.set("state"+str(self.pin), state)
db.dump()
return True
class Session:
def __init__(self):
raise NotImplementedError
def send_email(self):
# sends email with session information
raise NotImplementedError
def delete(self):
# removes self
raise NotImplementedError
@staticmethod
def new(session_id, email='', comment=''):
# creates new session
raise NotImplementedError
@staticmethod
def fetch_all():
# returns iterable
raise NotImplementedError
@staticmethod
def get(session_id):
# returns session object of given id or None
raise NotImplementedError
@staticmethod
def init_db_file(filename):
with DbSession(filename) as c:
c.execute("CREATE TABLE IF NOT EXISTS [sessions] ([session_id] VARCHAR PRIMARY KEY NOT NULL UNIQUE, [comment] VARCHAR, [email] VARCHAR);")
|
from bot import dp
from filters import IsOwner
from functions import admin_menu
@dp.message_handler(IsOwner(), commands="admin")
async def admin(message):
await admin_menu(message) |
# Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.common import utils
from neutron.db.models import external_net
from neutron_lib import constants
from neutron_lib.plugins.ml2 import api
from nuage_neutron.plugins.common import base_plugin
from nuage_neutron.plugins.common import constants as nuage_constants
from nuage_neutron.plugins.common import nuagedb
from sqlalchemy.orm import exc
LOG = logging.getLogger(__name__)
class NuageSubnetExtensionDriver(api.ExtensionDriver,
base_plugin.RootNuagePlugin):
_supported_extension_alias = 'nuage-subnet'
def initialize(self):
super(NuageSubnetExtensionDriver, self).__init__()
self.init_vsd_client()
# keep track of values
self.val_by_id = {}
@property
def extension_alias(self):
return self._supported_extension_alias
def _is_network_external(self, session, net_id):
try:
session.query(external_net.ExternalNetwork)\
.filter_by(network_id=net_id).one()
return True
except exc.NoResultFound:
return False
def _store_change(self, result, data, field):
# Due to ml2 plugin result does not get passed to our plugin
if field in data and data[field] != constants.ATTR_NOT_SPECIFIED:
if field == nuage_constants.NUAGE_UNDERLAY:
self.val_by_id[(result['id'], field)] = data[field]
result[field] = data[field]
def process_create_subnet(self, plugin_context, data, result):
self._copy_nuage_attributes(data, result)
# Make sure nuage_underlay is not processed as part of create
result.pop(nuage_constants.NUAGE_UNDERLAY, None)
def process_update_subnet(self, plugin_context, data, result):
if nuage_constants.NUAGE_UNDERLAY not in data:
data[nuage_constants.NUAGE_UNDERLAY] = None
self._copy_nuage_attributes(data, result)
def _copy_nuage_attributes(self, data, result):
nuage_attributes = ('net_partition', 'nuagenet', 'underlay',
'nuage_uplink', nuage_constants.NUAGE_UNDERLAY)
for attribute in nuage_attributes:
self._store_change(result, data, attribute)
@utils.exception_logger()
def extend_subnet_dict(self, session, db_data, result):
subnet_mapping = nuagedb.get_subnet_l2dom_by_id(session, result['id'])
if subnet_mapping:
result['net_partition'] = subnet_mapping['net_partition_id']
result['vsd_managed'] = subnet_mapping['nuage_managed_subnet']
if result['vsd_managed']:
result['nuagenet'] = subnet_mapping['nuage_subnet_id']
else:
result['vsd_managed'] = False
result['nuage_l2bridge'] = nuagedb.get_nuage_l2bridge_id_for_network(
session, result['network_id'])
if self._is_network_external(session, db_data['network_id']):
# Add nuage underlay parameter and set the nuage_uplink for
# subnets in external network.
# Normally external subnet is always l3, but in process of updating
# internal to external network, update network postcommit process
# is looping over current subnets and at that time these are still
# l2 in VSD; hence checking for l3 (if not, skip this block).
if subnet_mapping and self._is_l3(subnet_mapping):
nuage_underlay_db = nuagedb.get_subnet_parameter(
session, result['id'], nuage_constants.NUAGE_UNDERLAY)
nuage_subnet = self.vsdclient.get_nuage_subnet_by_id(
subnet_mapping['nuage_subnet_id'],
subnet_type=nuage_constants.L3SUBNET)
result['underlay'] = bool(nuage_underlay_db)
if nuage_subnet:
result['nuage_uplink'] = nuage_subnet['parentID']
else:
# Add nuage_underlay parameter
update = self.val_by_id.pop(
(result['id'], nuage_constants.NUAGE_UNDERLAY),
constants.ATTR_NOT_SPECIFIED)
nuage_underlay_db = nuagedb.get_subnet_parameter(
session, result['id'], nuage_constants.NUAGE_UNDERLAY)
if (update is constants.ATTR_NOT_SPECIFIED and
not result['vsd_managed'] and
not self._is_ipv6(result) and
subnet_mapping and
self._is_l3(subnet_mapping)):
# No update, db value
result['nuage_underlay'] = (
nuage_underlay_db['parameter_value']
if nuage_underlay_db else
nuage_constants.NUAGE_UNDERLAY_INHERITED)
elif (update is not constants.ATTR_NOT_SPECIFIED and
update != nuage_underlay_db):
# update + change
result['nuage_underlay'] = update
return result
|
from cyberbrain import Binding, Symbol
def test_hello(tracer, check_golden_file):
tracer.start()
x = "hello world" # LOAD_CONST, STORE_FAST
y = x # LOAD_FAST, STORE_FAST
x, y = y, x # ROT_TWO, STORE_FAST
tracer.stop()
|
__author__ = 'Robert Meyer'
import multiprocessing as mp
import numpy as np
import os # For path names working under Windows and Linux
from pypet import Environment, cartesian_product
def multiply(traj, result_list):
"""Example of a sophisticated simulation that involves multiplying two values.
This time we will store tha value in a shared list and only in the end add the result.
:param traj:
Trajectory containing
the parameters in a particular combination,
it also serves as a container for results.
"""
z=traj.x*traj.y
result_list[traj.v_idx] = z
def main():
# Create an environment that handles running
filename = os.path.join('hdf5', 'example_12.hdf5')
env = Environment(trajectory='Multiplication',
filename=filename,
file_title='Example_12_Sharing_Data',
overwrite_file=True,
comment='The first example!',
continuable=False, # We have shared data in terms of a multiprocessing list,
# so we CANNOT use the continue feature.
multiproc=True,
ncores=2)
# The environment has created a trajectory container for us
traj = env.trajectory
# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')
# Explore the parameters with a cartesian product
traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))
# We want a shared list where we can put all out results in. We use a manager for this:
result_list = mp.Manager().list()
# Let's make some space for potential results
result_list[:] =[0 for _dummy in range(len(traj))]
# Run the simulation
env.run(multiply, result_list)
# Now we want to store the final list as numpy array
traj.f_add_result('z', np.array(result_list))
# Finally let's print the result to see that it worked
print(traj.z)
#Disable logging and close all log-files
env.disable_logging()
if __name__ == '__main__':
main() |
"""Functions for dealing with SOAP in oseoserver."""
from __future__ import absolute_import
from lxml import etree
from .constants import NAMESPACES
from .errors import InvalidSoapVersionError
from .auth import usernametoken
def get_soap_version(request_element):
"""Return a request"s SOAP version
Supported SOAP versions are 1.1 and 1.2.
Parameters
----------
request_element: etree.Element
OSEO request to be processed
Returns
-------
str or None
The detected SOAP version
Raises
------
oseoserver.errors.InvalidSoapVersionError
If an invalid SOAP version has been requested
"""
qname = etree.QName(request_element.tag)
if qname.localname == "Envelope":
if qname.namespace == NAMESPACES["soap"]:
result = "1.2"
elif qname.namespace == NAMESPACES["soap1.1"]:
result = "1.1"
else:
raise InvalidSoapVersionError("Could not detect SOAP version")
else:
result = None
return result
def get_soap_fault_code(response_text):
"""Retrieve the correct SOAP fault code from a response"""
return {
"AuthorizationFailed": "Sender",
"AuthenticationFailed": "Sender",
"InvalidOrderIdentifier": "Sender",
"NoApplicableCode": "Sender",
"UnsupportedCollection": "Sender",
"InvalidParameterValue": "Sender",
"SubscriptionNotSupported": "Sender",
"ProductOrderingNotSupported": "Sender",
"FutureProductNotSupported": "Sender",
}.get(response_text)
def get_response_content_type(soap_version):
"""Return the correct response content-type according to the SOAP version.
Parameters
----------
soap_version: str
The SOAP version in use
Returns
-------
content_type: str
HTTP Content-Type header to use for the response.
"""
try:
result = {
"1.1": "text/xml",
"1.2": "application/soap+xml",
}[soap_version]
except KeyError:
raise InvalidSoapVersionError(
"Unsupported SOAP version {!r}".format(soap_version))
return result
def unwrap_request(request_element):
"""Remove a request"s SOAP envelope.
Parameters
----------
request_element: etree.Element
The request element to unwrap
Returns
-------
etree.Element
The unwrapped request element
user: str, optional
The username that made the request
password: str, optional
The password of the detected username
password_attributes: dict, optional
Any attributes present on the password element
"""
soap_version = get_soap_version(request_element)
if soap_version is not None:
soap_ns_prefix = "soap" if soap_version == "1.2" else "soap1.1"
body_path = "{}:Body/*".format(soap_ns_prefix)
request_data = request_element.xpath(body_path.format(soap_version),
namespaces=NAMESPACES)[0]
user, password, password_attributes = usernametoken.get_details(
request_element, soap_version)
else:
request_data = request_element
user = None
password = None
password_attributes = None
return request_data, user, password, password_attributes
def wrap_response(response_element, soap_version):
"""Wrap the OSEO operation response in a SOAP envelope.
Parameters
----------
response_element: etree.Element
The generated response
soap_version: str
The version of SOAP to use
Returns
-------
etree.Element
The SOAP-wrapped response
"""
soap_env_ns = {
"ows": NAMESPACES["ows"],
}
if soap_version == "1.2":
soap_env_ns["soap"] = NAMESPACES["soap"]
else:
soap_env_ns["soap"] = NAMESPACES["soap1.1"]
soap_env = etree.Element("{%s}Envelope" % soap_env_ns["soap"],
nsmap=soap_env_ns)
soap_body = etree.SubElement(soap_env, "{%s}Body" %
soap_env_ns["soap"])
soap_body.append(response_element)
return soap_env
def wrap_soap_fault(exception_element, soap_code, soap_version):
"""Wrap the ExceptionReport in a SOAP envelope.
Parameters
----------
exception_element: etree.Element
The generated exception report to wrap
soap_code: str
The soap code to use
soap_version: str
The version of SOAP to use
Returns
-------
etree.Element
The SOAP-wrapped response
"""
code_msg = "soap:{}".format(soap_code.capitalize())
reason_msg = "{} exception was encountered".format(
soap_code.capitalize())
soap_env_ns = {
"ows": NAMESPACES["ows"],
"xml": NAMESPACES["xml"],
}
if soap_version == "1.2":
soap_env_ns["soap"] = NAMESPACES["soap"]
else:
soap_env_ns["soap"] = NAMESPACES["soap1.1"]
soap_env = etree.Element("{{{}}}Envelope".format(soap_env_ns["soap"]),
nsmap=soap_env_ns)
soap_body = etree.SubElement(soap_env, "{{{}}}Body".format(
soap_env_ns["soap"]))
soap_fault = etree.SubElement(soap_body, "{{{}}}Fault".format(
soap_env_ns["soap"]))
if soap_version == "1.2":
fault_code = etree.SubElement(soap_fault, "{{{}}}Code".format(
soap_env_ns["soap"]))
code_value = etree.SubElement(fault_code, "{{{}}}Value".format(
soap_env_ns["soap"]))
code_value.text = code_msg
fault_reason = etree.SubElement(soap_fault, "{{{}}}Reason".format(
soap_env_ns["soap"]))
reason_text = etree.SubElement(fault_reason, "{{{}}}Text".format(
soap_env_ns["soap"]))
reason_text.set("{{{}}}lang".format(soap_env_ns["xml"]), "en")
reason_text.text = reason_msg
fault_detail = etree.SubElement(soap_fault, "{{{}}}Detail".format(
soap_env_ns["soap"]))
fault_detail.append(exception_element)
else:
fault_code = etree.SubElement(soap_fault, "faultcode")
fault_code.text = code_msg
fault_string = etree.SubElement(soap_fault, "faultstring")
fault_string.text = reason_msg
fault_actor = etree.SubElement(soap_fault, "faultactor")
fault_actor.text = ""
detail = etree.SubElement(soap_fault, "detail")
detail.append(exception_element)
return soap_env
|
import numpy as np
import pandas as pd
import random
import sys
import warnings
from sklearn.model_selection import train_test_split
from collections import Counter
from matplotlib import style
import matplotlib.pyplot as plt
class k_nearest_neighbors():
def __init__(self):
#print("init")
self.weights=[]
self.k=0
self.accuracy=None
def fit(self,data,label,k=3):
self.k=k
#weights=[]
if len(Counter(label)) >= k:
warnings.warn('K is set to a value less than total voting groups!')
distances = []
for i in range(len(data)):
for features in data:
euclidean_distance = np.linalg.norm(np.array(features)-np.array(data[i]))
distances.append([euclidean_distance,label[data.index(features)]])
votes = [i[1] for i in sorted(distances)[:k]]
self.weights.append([data[i],Counter(votes).most_common(1)[0][0]])
distances=[]
#print(weights)
def score(self,data,labels):
check=self.predict(data)
tot,rit=0,0
for i in range(len(labels)):
if labels[i]==check[i][0]:
rit+=1
tot+=1
return (rit/tot)*100
def predict(self,test):
k=self.k
weights=self.weights
result=[]
distances=[]
for i in test:
for features in weights:
euclidean_distance = np.linalg.norm(np.array(features[0])-np.array(i))
distances.append([euclidean_distance,features[1]])
votes = [minimum[1] for minimum in sorted(distances)[:k]]
result.append([Counter(votes).most_common(1)[0][0],Counter(votes).most_common(1)[0][1]/k])
#print(distances,'##')
distances=[]
return result
if __name__=="__main__":
style.use('fivethirtyeight')
data =[[1,2,'k'],[2,3,'k'],[3,1,'k'],[6,5,'r'],[7,7,'r'],[8,6,'r']]
random.shuffle(data)
X=[i[:-1]for i in data]
y=[i[-1]for i in data]
new_features = [[5,7],[0,4],[10,10]]
new_labels=['r','k','r']
clf=k_nearest_neighbors()
clf.fit(X,y,k=3)
print("Accuracy:",clf.score(new_features,new_labels))
result=clf.predict(new_features)
print(new_features)
#result contains the prediction and the confidence of the prediction
print(result)
for i in new_features:
plt.scatter(i[0], i[1], s=100, color = result[new_features.index(i)][0])
for i in data:
plt.scatter(i[0],i[1],color=i[2])
plt.show()
|
import json
from chatapp import path_util
def load_config():
with open(path_util.get_file_path('server', 'server_config.json'), 'r') as server_config_json:
server_config = json.load(server_config_json)
return server_config
|
"""A functions module, includes all the standard functions.
Combinatorial - factorial, fibonacci, harmonic, bernoulli...
Elementary - hyperbolic, trigonometric, exponential, floor and ceiling, sqrt...
Special - gamma, zeta,spherical harmonics...
"""
from sympy.core.basic import Basic
import combinatorial
import elementary
import special
from special.polynomials import (legendre, assoc_legendre, hermite, chebyshevt,
chebyshevu, chebyshevu_root, chebyshevt_root, laguerre_l)
# see #391
from combinatorial.factorials import factorial, factorial2, rf, ff, binomial
from combinatorial.factorials import factorial, RisingFactorial, FallingFactorial
from combinatorial.factorials import binomial, factorial2
from combinatorial.numbers import fibonacci, lucas, harmonic, bernoulli, bell, euler, catalan
from elementary.miscellaneous import sqrt, root, Min, Max, Id
from elementary.complexes import re, im, sign, Abs, conjugate, arg
from elementary.trigonometric import acot, cot, tan, cos, sin, asin, acos, atan, atan2
from elementary.exponential import exp, log, LambertW
from elementary.hyperbolic import sinh, cosh, tanh, coth, asinh, acosh, atanh, acoth
from elementary.integers import floor, ceiling
from elementary.piecewise import Piecewise, piecewise_fold
from special.error_functions import erf
from special.gamma_functions import gamma, lowergamma, uppergamma, polygamma, \
loggamma, digamma, trigamma, beta
from special.zeta_functions import dirichlet_eta, zeta
from special.spherical_harmonics import Ylm, Zlm
from special.tensor_functions import Eijk, LeviCivita, KroneckerDelta
from special.delta_functions import DiracDelta, Heaviside
from special.bsplines import bspline_basis, bspline_basis_set
from special.bessel import besselj, bessely, besseli, besselk, hankel1, \
hankel2, jn, yn, jn_zeros
from special.hyper import hyper, meijerg
ln = log
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from deprecation import deprecated as Deprecated
@Deprecated(deprecated_in="0.0.3", details="Interactive mode is to be removed in 0.0.4", removed_in="0.0.4")
def getNum(prompt, valType):
"""
Function to obtain valid inputs from keyboard
:param prompt: Text prompt to user
:param valType: Type of value expected from user
:return: Your mom
"""
if valType=='f':
while True:
try:
return float(eval(input(prompt)))
except:
print("Invalid input \n")
elif valType=='i':
while True:
try:
x = eval(input(prompt))
if isinstance(x, int):
return(int(x))
else:
print("Invalid input \n")
continue
except:
print("Invalid input \n") |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import io
import logging
import numpy as np
from typing import List
import onnx
import torch
from caffe2.python.onnx.backend import Caffe2Backend
from torch.onnx import OperatorExportTypes
from .shared import (
ScopedWS,
construct_init_net_from_params,
fuse_alias_placeholder,
get_params_from_init_net,
group_norm_replace_aten_with_caffe2,
remove_reshape_for_fc,
save_graph,
)
logger = logging.getLogger(__name__)
def _export_via_onnx(model, inputs):
# make sure all modules are in eval mode, onnx may change the training state
# of the moodule if the states are not consistent
def _check_eval(module):
assert not module.training
model.apply(_check_eval)
# Export the model to ONNX
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(
model,
inputs,
f,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
# verbose=True, # NOTE: uncomment this for debugging
# export_params=True,
)
onnx_model = onnx.load_from_string(f.getvalue())
# Apply ONNX's Optimization
all_passes = onnx.optimizer.get_available_passes()
passes = ["fuse_bn_into_conv"]
assert all(p in all_passes for p in passes)
onnx_model = onnx.optimizer.optimize(onnx_model, passes)
# Convert ONNX model to Caffe2 protobuf
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
return predict_net, init_net
def _op_stats(net_def):
type_count = {}
for t in [op.type for op in net_def.op]:
type_count[t] = type_count.get(t, 0) + 1
type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet
type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count
return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list)
def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
"""
Export a Detectron2 model via ONNX.
Arg:
model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
tensor_inputs: a list of tensors that caffe2 model takes as input.
"""
model = copy.deepcopy(model)
assert isinstance(model, torch.nn.Module)
assert hasattr(model, "encode_additional_info")
# Export via ONNX
logger.info("Exporting a {} model via ONNX ...".format(type(model).__name__))
predict_net, init_net = _export_via_onnx(model, (tensor_inputs,))
logger.info("ONNX export Done.")
# Apply protobuf optimization
fuse_alias_placeholder(predict_net, init_net)
params = get_params_from_init_net(init_net)
predict_net, params = remove_reshape_for_fc(predict_net, params)
group_norm_replace_aten_with_caffe2(predict_net)
init_net = construct_init_net_from_params(params)
# Record necessary information for running the pb model in Detectron2 system.
model.encode_additional_info(predict_net, init_net)
logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
return predict_net, init_net
def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path):
"""
Run the caffe2 model on given inputs, recording the shape and draw the graph.
predict_net/init_net: caffe2 model.
tensor_inputs: a list of tensors that caffe2 model takes as input.
graph_save_path: path for saving graph of exported model.
"""
logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path))
save_graph(predict_net, graph_save_path, op_only=False)
# Run the exported Caffe2 net
logger.info("Running ONNX exported model ...")
with ScopedWS("__ws_tmp__", True) as ws:
ws.RunNetOnce(init_net)
initialized_blobs = set(ws.Blobs())
uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs]
for name, blob in zip(uninitialized, tensor_inputs):
ws.FeedBlob(name, blob)
try:
ws.RunNetOnce(predict_net)
except RuntimeError as e:
logger.warning("Encountered RuntimeError: \n{}".format(str(e)))
ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()}
blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)}
logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path))
save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes)
return ws_blobs
|
"""
Woopra template tags and filters.
"""
from __future__ import absolute_import
import json
import re
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError
from analytical.utils import (
disable_html,
get_identity,
get_required_setting,
get_user_from_context,
get_user_is_authenticated,
is_internal_ip,
)
DOMAIN_RE = re.compile(r'^\S+$')
TRACKING_CODE = """
<script type="text/javascript">
var woo_settings = %(settings)s;
var woo_visitor = %(visitor)s;
!function(){var a,b,c,d=window,e=document,f=arguments,g="script",h=["config","track","trackForm","trackClick","identify","visit","push","call"],i=function(){var a,b=this,c=function(a){b[a]=function(){return b._e.push([a].concat(Array.prototype.slice.call(arguments,0))),b}};for(b._e=[],a=0;a<h.length;a++)c(h[a])};for(d.__woo=d.__woo||{},a=0;a<f.length;a++)d.__woo[f[a]]=d[f[a]]=d[f[a]]||new i;b=e.createElement(g),b.async=1,b.src="//static.woopra.com/js/w.js",c=e.getElementsByTagName(g)[0],c.parentNode.insertBefore(b,c)}("woopra");
woopra.config(woo_settings);
woopra.identify(woo_visitor);
woopra.track();
</script>
""" # noqa
register = Library()
@register.tag
def woopra(parser, token):
"""
Woopra tracking template tag.
Renders Javascript code to track page visits. You must supply
your Woopra domain in the ``WOOPRA_DOMAIN`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return WoopraNode()
class WoopraNode(Node):
def __init__(self):
self.domain = get_required_setting(
'WOOPRA_DOMAIN', DOMAIN_RE,
"must be a domain name")
def render(self, context):
settings = self._get_settings(context)
visitor = self._get_visitor(context)
html = TRACKING_CODE % {
'settings': json.dumps(settings, sort_keys=True),
'visitor': json.dumps(visitor, sort_keys=True),
}
if is_internal_ip(context, 'WOOPRA'):
html = disable_html(html, 'Woopra')
return html
def _get_settings(self, context):
variables = {'domain': self.domain}
try:
variables['idle_timeout'] = str(settings.WOOPRA_IDLE_TIMEOUT)
except AttributeError:
pass
return variables
def _get_visitor(self, context):
params = {}
for dict_ in context:
for var, val in dict_.items():
if var.startswith('woopra_'):
params[var[7:]] = val
if 'name' not in params and 'email' not in params:
user = get_user_from_context(context)
if user is not None and get_user_is_authenticated(user):
params['name'] = get_identity(
context, 'woopra', self._identify, user)
if user.email:
params['email'] = user.email
return params
def _identify(self, user):
name = user.get_full_name()
if not name:
name = user.username
return name
def contribute_to_analytical(add_node):
WoopraNode() # ensure properly configured
add_node('head_bottom', WoopraNode)
|
import aiohttp_jinja2
from aiohttp.web import Request
from roll_witch.rolling import command
@aiohttp_jinja2.template("roller.jinja2")
async def roll(request: Request):
data = await request.post()
try:
bot_operation = command.get_command(
message_content=data["roll_operation"]
)
if bot_operation:
operation_output = bot_operation.execute()
return {"output": operation_output}
except ValueError:
return {"output": " Invalid Command"}
except Exception as e:
if hasattr(e, "message"):
msg = e.message
else:
msg = str(e)
return {"output": f"I ain't Dead \n {msg}"}
|
#!/usr/bin/python3
#Importing all needed libraries / modules
import os
import sys
import time
from time import sleep, strftime
import cv2
import mysql.connector
#Class for database connection and manipulation
class DB_Connection:
def __init__(self):
try:
self.db = mysql.connector.connect(
host="localhost",
user="simon",
passwd="P@ssw0rd$!-278",
database="attendID"
)
self.mycursor = self.db.cursor()
debugMsg("Initiated db connection successfully")
except Exception as x:
raise Exception("Error occured:" + x)
def insert(self,timeValue,classValue="4AHITN",studentsValue=0):
#Query for creating students
debugMsg("Trying to insert to db")
try:
sql_query=("INSERT INTO AttendingStudents (ReadingTime,Class,AttendingStudents) VALUE (%s,%s,%s)")
sql_values=(timeValue,classValue,studentsValue) #Value needs to be in format time,class,students
#Executes the query and writes into the database
self.mycursor.execute(sql_query,sql_values)
#Commit changes to db
self.db.commit()
debugMsg("Successfully inserted")
except Exception as x:
raise Exception("Error occured:" + x)
def debugMsg(message):
if debug:
print(message)
#Path of the identyfier
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
webcam_capture = None
numFaces = [] #Stores the average amount of faces in the array
avgFace = 0 #Average amount of faces
#Evaluation time
evTime = strftime("%Y-%m-%d %H:%M:%S")
#Classes --> will be given from the website later
classValue = "noClass"
maxStudents = 0
debug = False
#Websites tells the script the needed params: classes, maxStudents, (debug)
if len(sys.argv) > 1:
if sys.argv[1] != "":
try:
classValue = sys.argv[1]
except:
print("Error reading param: classValue (arg[1])")
sys.exit()
if len(sys.argv) > 2:
if sys.argv[2] != "":
try:
if int(sys.argv[2]) >= 0:
maxStudents = int(sys.argv[2])
except:
print("Error reading param: maxStudents (arg[2])")
sys.exit()
if len(sys.argv) > 3:
debug = sys.argv[3] != None #Debug is used to print to console
if maxStudents == 0 or classValue == "noClass":
print("Invalid parameters")
sys.exit()
#Creates db obj
db = None
try:
db = DB_Connection()
except Exception as e:
print(e)
if webcam_capture != None:
webcam_capture.release()
print("Error while creating db connection, exiting...")
sys.exit()
webcam_capture = cv2.VideoCapture(0)
#Webcam capture is in this case the webcam
debugMsg("Starting to read from webcam")
#How often webcam could not be read
errorCam = 0
while True:
#If no webcam is detected wait for 5 seconds
if not webcam_capture.isOpened():
errorCam+=1
print("Unable to load camera. {0}. try".format(errorCam))
sleep(5)
if errorCam < 3:
continue #Jumps back to beginning of loop
else:
print("Could not access camera. Exiting...")
sys.exit()
#Sleep to manage performance on cpu
sleep(0.1)
#Read a frame
res, frame = webcam_capture.read()
#Convert image to gray img
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
debugMsg("Running face detector")
#This function returns a list of rectangles, where faces appear
faces = faceCascade.detectMultiScale(
gray,
#Change scale factor if too many invalid detections
scaleFactor=1.2,
#3-6 is a good value
minNeighbors=5,
#Min size of obj
minSize=(30, 30)
#,maxSize=(x,x)
)
#Max x stundents in class
if len(faces) <= maxStudents:
#Print number of faces
if debug:
if len(faces) > 1 or len(faces)==0: #0 or more then 1 faces found
print("{0} faces found!".format(len(faces)))
else: #Only 1 face found
print("{0} face found!".format(len(faces)))
numFaces.append(len(faces))
if len(numFaces) >= 50: #5 s
#Saving a image for debugging purpose
debugMsg("Saving last computed img for debugging")
for (x,y,w,h) in faces: #Drawing rectangle
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
path = "/var/www/html/pictures/reading.png"
if os.path.exists(path): #If file already exists delete and rewrite
os.remove(path)
status = cv2.imwrite(path,frame)
debugMsg("Saved img with code: {0}".format(status))
#Calculating average
debugMsg("Calculating average")
for x in numFaces:
avgFace+=x #Adds all the numbers of faces
avgFace = int(round(avgFace / len(numFaces))) #Average of faces displayed as an int
debugMsg("Average faces:" + str(avgFace))
try:
db.insert(evTime,classValue,avgFace)
except Exception as e:
print(e)
#Done with the script
break
#Press q to exit programm
if cv2.waitKey(1) & 0xFF == ord('q'):
debugMsg("Force quit initiated")
break
# When everything is done, release the capture
webcam_capture.release()
debugMsg("Webcam released, completely done now")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.