content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python3
import sys
def start():
from poshc2.client.command_handlers.ImplantHandler import main
args = sys.argv
args.remove("--client")
args.remove("start.py")
main(args)
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from .models import Experiment
class EditExperimentForm(forms.ModelForm):
"""From to edit an experiment. Differs from the creation form, as it
does not allow editing of the folder_name, but does allow changing the
state of the experiment.
"""
class Meta:
model = Experiment
fields = ('title', 'state', 'users',)
widgets = {
'title': forms.TextInput,
'folder_name': forms.TextInput,
}
class CreateExperimentForm(forms.ModelForm):
"""Form to create experiments. Differs from the edit form, as it allows
changing of the folder_name, but does not have the state field.
Changing the state at creation is not desired, as we want experiments to
be explicitly opened by the user when needed.
"""
class Meta:
model = Experiment
fields = ('title', 'folder_name', 'users',)
widgets = {
'title': forms.TextInput,
'folder_name': forms.TextInput,
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Adds a new label and help text to folder_name
self.fields['folder_name'].label = _("experiments:forms:folder_name")
self.fields['folder_name'].help_text = _(
"experiments:forms:folder_name:help"
).format(settings.WEBEXPERIMENT_HOST)
def clean_folder_name(self):
"""Ensures that no 2 experiments will have the same folder name"""
data = self.cleaned_data['folder_name']
if Experiment.objects.filter(folder_name=data).exists():
self.add_error(
'folder_name',
_('experiments:forms:create:duplicate_folder')
)
return data.lower()
|
from .camelsplit import camelsplit # noqa
|
#!/usr/bin/env python3
bind = '0.0.0.0:5000'
pid = '/tmp/chat_websocket.pid'
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Numpy中实现基本数学计算脚本
Created on 2017-11-16
author: denglelai
"""
import numpy as np
def main():
"""
run basic operations of numpy
"""
# 绝对值,1
a_variable = np.abs(-1)
print "-1的绝对值为:" + str(a_variable)
# sin函数,1.0
a_variable = np.sin(np.pi / 2)
print "pi/2的正弦值为:" + str(a_variable)
# tanh逆函数,0.500001071578
a_variable = np.arctanh(0.462118)
print "tanh(0.462118)值为:" + str(a_variable)
# e为底的指数函数,20.0855369232
a_variable = np.exp(3)
print "e的3次方值为:" + str(a_variable)
# 2的3次方,8
a_variable = np.power(2, 3)
print "2的3次方值为:" + str(a_variable)
# 点积,1*3+2*4=11
a_variable = np.dot([1, 2], [3, 4])
print "向量[1. 2]与向量[3. 4]点乘值为:" + str(a_variable)
# 开方,5
a_variable = np.sqrt(25)
print "25的2次方根值为:" + str(a_variable)
# 求和,10
a_variable = np.sum([1, 2, 3, 4])
print "对[1, 2, 3, 4]中元素求和结果为:" + str(a_variable)
# 平均值,5.5
a_variable = np.mean([4, 5, 6, 7])
print "对[1, 2, 3, 4]中元素求平均结果为:" + str(a_variable)
# 标准差,0.968245836552
a_variable = np.std([1, 2, 3, 2, 1, 3, 2, 0])
print "对[1, 2, 3, 2, 1, 3, 2, 0]中元素求标准差结果为:" + str(a_variable)
if __name__ == '__main__':
main()
|
# -*- coding:utf-8 -*-
# @time :2020/8/28
# @IDE : pycharm
# @author :lxztju
# @github : https://github.com/lxztju
# @Emial : lxztju@163.com
import torch
def load_checkpoint(filepath):
checkpoint = torch.load(filepath, map_location='cpu')
model = checkpoint['model'] # 提取网络结构
model.load_state_dict(checkpoint['model_state_dict']) # 加载网络权重参数
for parameter in model.parameters():
parameter.requires_grad = False
model.eval()
return model
model = load_checkpoint('./resnext101_32x8.pth')
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save('./trace_resnext101_32x8.pt')
output = traced_script_module(torch.ones(1, 3, 224, 224))
print(output)
|
#PPO-LSTM
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
import time
import numpy as np
import sys
sys.path.append(".")
from args.config import PPO_lstm_params as params
class PPO(nn.Module):
def __init__(self, learning_rate, K_epoch, gamma, lmbda, eps_clip):
super(PPO, self).__init__()
self.learning_rate = learning_rate
self.K_epoch = K_epoch
self.gamma = gamma
self.eps_clip = eps_clip
self.lmbda = lmbda
self.data = []
self.fc1 = nn.Linear(4, 64)
self.lstm = nn.LSTM(64, 32)
self.fc_pi = nn.Linear(32, 2)
self.fc_v = nn.Linear(32, 1)
self.optimizer = optim.Adam(self.parameters(), lr=self.learning_rate)
def pi(self, x, hidden):
x = F.relu(self.fc1(x))
x = x.view(-1, 1, 64)
x, lstm_hidden = self.lstm(x, hidden)
x = self.fc_pi(x)
prob = F.softmax(x, dim=2)
return prob, lstm_hidden
def v(self, x, hidden):
x = F.relu(self.fc1(x))
x = x.view(-1, 1, 64)
x, lstm_hidden = self.lstm(x, hidden)
v = self.fc_v(x)
return v
def put_data(self, transition):
self.data.append(transition)
def make_batch(self):
s_lst, a_lst, r_lst, s_prime_lst, prob_a_lst, h_in_lst, h_out_lst, done_lst = [], [], [], [], [], [], [], []
for transition in self.data:
s, a, r, s_prime, prob_a, h_in, h_out, done = transition
s_lst.append(s)
a_lst.append([a])
r_lst.append([r])
s_prime_lst.append(s_prime)
prob_a_lst.append([prob_a])
h_in_lst.append(h_in)
h_out_lst.append(h_out)
done_mask = 0 if done else 1
done_lst.append([done_mask])
s,a,r,s_prime,done_mask,prob_a = torch.tensor(s_lst, dtype=torch.float), torch.tensor(a_lst), \
torch.tensor(r_lst), torch.tensor(s_prime_lst, dtype=torch.float), \
torch.tensor(done_lst, dtype=torch.float), torch.tensor(prob_a_lst)
self.data = []
return s, a, r, s_prime, done_mask, prob_a, h_in_lst[0], h_out_lst[0]
def train_net(self):
s, a, r, s_prime, done_mask, prob_a, (h1_in, h2_in), (
h1_out, h2_out) = self.make_batch()
first_hidden = (h1_in.detach(), h2_in.detach())
second_hidden = (h1_out.detach(), h2_out.detach())
for i in range(self.K_epoch):
v_prime = self.v(s_prime, second_hidden).squeeze(1)
td_target = r + self.gamma * v_prime * done_mask
v_s = self.v(s, first_hidden).squeeze(1)
delta = td_target - v_s
delta = delta.detach().numpy()
advantage_lst = []
advantage = 0.0
for item in delta[::-1]:
advantage = self.gamma * self.lmbda * advantage + item[0]
advantage_lst.append([advantage])
advantage_lst.reverse()
advantage = torch.tensor(advantage_lst, dtype=torch.float)
pi, _ = self.pi(s, first_hidden)
pi_a = pi.squeeze(1).gather(1, a)
ratio = torch.exp(torch.log(pi_a) -
torch.log(prob_a)) # a/b == log(exp(a)-exp(b))
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1 - self.eps_clip,
1 + self.eps_clip) * advantage
loss = -torch.min(surr1, surr2) + F.smooth_l1_loss(
v_s, td_target.detach())
self.optimizer.zero_grad()
loss.mean().backward(retain_graph=True)
self.optimizer.step()
class PPO_lstm_algo():
def __init__(self):
super(PPO_lstm_algo, self).__init__()
self.env = gym.make(params['gym_env'])
self.T_horizon = params['T_horizon']
self.epoch = params['epoch']
self.learning_rate = params['learning_rate']
self.K_epoch = params['K_epoch']
self.gamma = params['gamma']
self.eps_clip = params['eps_clip']
self.lmbda = params['lmbda']
self.print_interval = params["print_interval"]
self.model = PPO(self.learning_rate, self.K_epoch, self.gamma,
self.lmbda, self.eps_clip)
self.print_interval = params['print_interval']
self.init_write()
def init_write(self):
with open("./result/PPO_lstm.csv", "w+", encoding="utf-8") as f:
f.write("epoch_number,average reward\n")
def train(self):
score = 0.0
for n_epi in range(self.epoch):
h_out = (torch.zeros([1, 1, 32], dtype=torch.float),
torch.zeros([1, 1, 32], dtype=torch.float))
s = self.env.reset()
done = False
while not done:
for t in range(self.T_horizon):
h_in = h_out
prob, h_out = self.model.pi(
torch.from_numpy(s).float(), h_in)
prob = prob.view(-1)
m = Categorical(prob)
a = m.sample().item()
s_prime, r, done, info = self.env.step(a)
self.model.put_data(
(s, a, r, s_prime, prob[a].item(), h_in, h_out, done))
s = s_prime
score += r
if done:
break
self.model.train_net()
if n_epi % self.print_interval == 0:
with open("./result/PPO_lstm.csv", "a+",
encoding="utf-8") as f:
f.write("{},{:.1f} \n".format(n_epi,
score / self.print_interval))
print("episode :{}, avg score : {:.1f}".format(
n_epi, score / self.print_interval))
score = 0.0
self.env.close()
if __name__ == '__main__':
algo = PPO_lstm_algo()
algo.train()
|
# -*- coding: utf-8 -*-
"""
Author: Michael Markus Ackermann
================================
"""
from typing import List
import numpy as np
def absorption_coefficient(zc: List[complex], kc: List[complex],
thickness: float, z_air: float):
"""
Returns the Sound Absorption Coefficient.
NOTE 1: The values for 'zc' and 'kc' are already divided by the porosity.
NOTE 2: This function only considers the normal incidence angle.
Args:
zc (List[complex]): Material Charactheristic Impedance.
kc (List[complex]): Material Wave Number.
thickness (float): Material Thickness.
z_air (float): Air Characteristic Impedance.
Returns:
absorption (np. ndarray): Sound Absorption Coefficient [no units].
"""
zs = -1j * (zc / np.tan(kc * thickness)) # Surface impedance (zs)
vp = (zs - z_air) / (zs + z_air) # Reflection coefficient (vp)
return 1 - np.abs(vp) ** 2 # Sound Absorption Coefficient
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
from mock import Mock
from attrdict import AttrDict
from ariadne.actions import Visit, Action, FillForm
@pytest.fixture
def ctx_browser():
""" :return: Context with mocked browser. """
mock = Mock()
context = AttrDict({
'browser': mock
})
return context
def test_action_run_not_implemented():
""" Run method isn't implemented in base class. """
with pytest.raises(NotImplementedError):
context = {}
Action().run(context)
class TestVisit:
@pytest.fixture
def action(self):
return Visit('/login')
def test_get_url(self, action):
""" get_url should return target URL. """
assert action.get_url() == '/login'
def test_get_url_with_server(self, action):
""" get_url should return target URL. """
assert action.get_url('http://localhost/') == 'http://localhost/login'
def test_run(self, action, ctx_browser):
""" Browser should visit given URL. """
action.run(ctx_browser)
ctx_browser.browser.visit.assert_called_with('/login')
def test_run_with_server(self, action, ctx_browser):
""" Browser should visit given URL. """
ctx_browser.update({
'server_url': 'http://localhost/'
})
action.run(ctx_browser)
ctx_browser.browser.visit.assert_called_with('http://localhost/login')
class TestFillForm:
@property
def params(self):
return {
'username': 'admin',
'password': 'secret',
}
def test_run(self, ctx_browser):
""" Fill form with data. """
action = FillForm(data=self.params)
action.run(context=ctx_browser)
ctx_browser.browser.fill_form.assert_called_with(self.params)
assert not ctx_browser.browser.find_by_css.called
def test_run_empty(self, ctx_browser):
""" Data might be empty. """
action = FillForm()
action.run(context=ctx_browser)
ctx_browser.browser.fill_form.assert_called_with({})
assert not ctx_browser.browser.find_by_css.called
def test_run_submit(self, ctx_browser):
""" Submit button by CSS id. """
action = FillForm(data=self.params, submit='#btn-submit')
action.run(context=ctx_browser)
# Assert mock
ctx_browser.browser.fill_form.assert_called_with(self.params)
ctx_browser.browser.find_by_css.assert_called_with('#btn-submit')
def test_run_submit_true(self, ctx_browser):
""" Submit button by CSS type. """
action = FillForm(data=self.params, submit=True)
action.run(context=ctx_browser)
# Assert mock
ctx_browser.browser.fill_form.assert_called_with(self.params)
ctx_browser.browser.find_by_css.assert_called_with('[type="submit"]')
|
#!/usr/bin/env python
"""
cTimer: A high resolution, high precision timer.
"""
import os
import sys
from distutils.core import setup, Extension
if sys.platform == 'darwin':
module1 = Extension('cTimer',
sources = ['cTimer.c'])
else:
module1 = Extension('cTimer',
sources = ['cTimer.c'],
extra_link_args = ['-lrt'])
f = open(os.path.join(os.path.dirname(__file__), 'ReadMe.md'))
long_description = f.read()
f.close()
version = '0.1.1'
setup (
name = 'cTimer',
version = version,
description = 'A high precision timer.',
long_description = long_description,
author = 'Chaordix (Russ Porosky)',
author_email = 'russ@chaordix.com',
url = 'https://github.com/chaordix/cTimer/',
download_url = ('https://github.com/downloads/Chaordix/cTimer/cTimer-%s.tar.gz' % version),
maintainer = 'Chaordix (Russ Porosky)',
maintainer_email = 'russ@chaordix.com',
keywords = ['timer', 'precision'],
license = 'MIT',
platforms = 'ALL',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
ext_modules = [module1]
)
|
#! /usr/bin/env python
####################################################################################
# calcSlopeDegrees.py
#
# A Python script to calculate slope from a DEM, where the horizontal spacing is in degrees
# latitude and longitude.
#
# Requires RIOS (https://bitbucket.org/chchrsc/rios/) to read image
#
# The base slope calculation is in Python. If Numba (http://numba.pydata.org)
# is available this is used to improve speed.
# For the best speed a Fortran function (slope.f) is available to perform the slope calculation.
# This must be compiled using:
#
# f2py -llapack -c slope.f90 -m slope
#
# Dan Clewley (daniel.clewley@gmail.com) - 26/06/2013
#
# Adapted from EASI code by Jane Whitcomb
#
#
# Copyright 2014 Daniel Clewley & Jane Whitcomb.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
####################################################################################
import sys,os
sys.path.append(os.sys.path[0])
from rios import imagereader
from rios.imagewriter import ImageWriter
import argparse
import numpy as np
from math import sqrt
useFortranSlope=True
haveNumba = False
try:
import slope
except ImportError:
useFortranSlope=False
try:
from numba import autojit
if useFortranSlope:
print('Numba is available - using Fortran module instead')
else:
print('Fortran module not available - using Numba instead')
haveNumba = True
except ImportError:
if not useFortranSlope:
print('Warning: Could not import Numba or Fortran slope module - will be about 50 x slower!')
else:
print('Fortran module is available')
# have to define our own autojit so Python doesn't complain
def autojit(func):
return func
@autojit
def slopePython(inBlock, outBlock, inXSize, inYSize, zScale=1):
""" Calculate slope using Python.
If Numba is available will make use of autojit function
to run at ~ 1/2 the speed of the Fortran module.
If not will fall back to pure Python - which will be slow!
"""
for x in range(1,inBlock.shape[2]-1):
for y in range(1, inBlock.shape[1]-1):
# Get window size
dx = 2 * inXSize[y,x]
dy = 2 * inYSize[y,x]
# Calculate difference in elevation
dzx = (inBlock[0,y,x-1] - inBlock[0,y,x+1])*zScale
dzy = (inBlock[0,y-1,x] - inBlock[0,y+1,x])*zScale
# Find normal vector to the plane
nx = -1 * dy * dzx
ny = -1 * dx * dzy
nz = dx * dy
slopeRad = np.arccos(nz / sqrt(nx**2 + ny**2 + nz**2))
slopeDeg = (180. / np.pi) * slopeRad
outBlock[0,y,x] = slopeDeg
return outBlock
def slopePythonPlane(inBlock, outBlock, inXSize, inYSize, A_mat, z_vec, winSize=3, zScale=1):
""" Calculate slope using Python.
Algorithm fits plane to a window of data and calculated the slope
from this - slope than the standard algorithm but can deal with
noisy data batter.
The matrix A_mat (winSize**2,3) and vector zScale (winSize**2) are allocated
outside the function and passed in.
"""
winOffset = int(winSize/2)
for x in range(winOffset-1,inBlock.shape[2]):
for y in range(winOffset-1, inBlock.shape[1]):
# Get window size
dx = winSize * inXSize[y,x]
dy = winSize * inYSize[y,x]
# Calculate difference in elevation
"""
Solve A b = x to give x
Where A is a matrix of:
x_pos | y_pos | 1
and b is elevation
and x are the coefficents
"""
# Form matrix
index = 0
for i in range(-1*winOffset, winOffset+1):
for j in range(-1*winOffset, winOffset+1):
A_mat[index,0] = 0+(i*inXSize[y,x])
A_mat[index,1] = 0+(j*inYSize[y,x])
A_mat[index,2] = 1
# Elevation
z_vec[index] = inBlock[0,y+j,x+i]*zScale
index+=1
# Linear fit
coeff_vec = np.linalg.lstsq(A_mat, z_vec)[0]
# Calculate dzx and dzy
dzx = coeff_vec[0] * dx
dzy = coeff_vec[1] * dy
# Find normal vector to the plane
nx = -1 * dy * dzx
ny = -1 * dx * dzy
nz = dx * dy
slopeRad = np.arccos(nz / sqrt(nx**2 + ny**2 + nz**2))
slopeDeg = (180. / np.pi) * slopeRad
outBlock[0,y,x] = slopeDeg
return outBlock
def calcSlope(inBlock, inXSize, inYSize, fitPlane=False, zScale=1, winSize=3, minSlope=None):
""" Calculates slope for a block of data
Arrays are provided giving the size for each pixel.
* inBlock - In elevation
* inXSize - Array of pixel sizes (x)
* inYSize - Array of pixel sizes (y)
* fitPlane - Calculate slope by fitting a plane to elevation
data using least squares fitting.
* zScale - Scaling factor between horizontal and vertical
* winSize - Window size to fit plane over.
"""
# If fortran class could be imported use this
if useFortranSlope:
if fitPlane:
outBlock = slope.slopeplane(inBlock[0], inXSize, inYSize, zScale, winSize)
else:
outBlock = slope.slope(inBlock[0], inXSize, inYSize, zScale)
# Add third dimension (required by rios)
outBlock = outBlock.reshape(1, outBlock.shape[0], outBlock.shape[1])
# Cast to 32 bit float (rather than 64 bit numpy default)
outBlock = outBlock.astype(np.float32)
else:
# Otherwise run through loop in python (which will be slower)
# Setup output block
outBlock = np.zeros_like(inBlock, dtype=np.float32)
if fitPlane:
# Setup matrix and vector required for least squares fitting.
winOffset = int(winSize/2)
A_mat = np.zeros((winSize**2,3))
z_vec = np.zeros(winSize**2)
slopePythonPlane(inBlock, outBlock, inXSize, inYSize, A_mat, z_vec, zScale, winSize)
else:
slopePython(inBlock, outBlock, inXSize, inYSize, zScale)
if minSlope is not None:
# Set very low values to constant
outBlock[0] = np.where(np.logical_and(outBlock[0] > 0,outBlock[0] < minSlope),minSlope,outBlock[0])
return(outBlock)
def getPixelSize(lat, latsize, lonsize):
""" Get the pixel size (in m) based on latitude and
pixel size in degrees
"""
# Set up parameters for elipse
# Semi-major and semi-minor for WGS-84 ellipse
ellipse = [6378137.0, 6356752.314245]
radlat = np.deg2rad(lat)
Rsq = (ellipse[0]*np.cos(radlat))**2+(ellipse[1]*np.sin(radlat))**2
Mlat = (ellipse[0]*ellipse[1])**2/(Rsq**1.5)
Nlon = ellipse[0]**2/np.sqrt(Rsq)
xsize = np.pi/180*np.cos(radlat)*Nlon*lonsize
ysize = np.pi/180*Mlat*latsize
return xsize, ysize
# Set up options
parser = argparse.ArgumentParser()
parser.add_argument("inimage", nargs=1,type=str, help="Input DEM")
parser.add_argument("outimage", nargs=1,type=str, help="Output Slope image")
parser.add_argument("--nostats", action='store_true', default=False, help="Don't calculate stats for output slope image.")
parser.add_argument("--plane_ls", action='store_true', default=False, help="Calculate slope by fitting a plane to a window of elevation data using least squares fitting.")
parser.add_argument("--min_slope", type=float, default=None, help="Set minimum value for slope (values smaller than this but greater than 0 will be fixed to this value)")
parser.add_argument("--window_size", type=int, default=3, help="Window size to calculate slope over when using least squares fitting (default 3)")
parser.add_argument("--z_scale", type=int, default=1, help="Scaling factor between horizontal (m) and vertical. Assumed 1 (vertical units are metres).")
parser.add_argument("--spacing_degrees", action='store_true', default=False, help="Pixel size is in degrees - converted to metres based on latitude.")
args = parser.parse_args()
inImage = args.inimage[0]
outImage = args.outimage[0]
zScale = args.z_scale
winSize = args.window_size
fitPlane = args.plane_ls
if not fitPlane and winSize != 3:
print("ERROR: Setting window size is only supported with '--plane_ls'")
sys.exit()
if fitPlane and not useFortranSlope:
print("WARNING: Couldn't import Fortran module, Numba isn't supported for plane fitting")
calcStats = True
if args.nostats:
calcStats = False
minSlope = args.min_slope
hDegree = args.spacing_degrees
# Set up RIOS image reader
reader = imagereader.ImageReader(inImage, overlap=int(winSize/2))
writer = None
print("Starting...")
for (info, inBlock) in reader:
# Get percent complete
sys.stdout.write("\r %i Percent Complete"%(int(info.getPercent())))
# Get coordinates for block
xCoords, yCoords = info.getBlockCoordArrays()
# Convert pixel sizes to m (if in degrees).
xres, yres = info.getPixelSize()
if hDegree:
xSize, ySize = getPixelSize(yCoords, xres, yres)
else:
xSize = np.zeros_like(xCoords)
ySize = np.zeros_like(yCoords)
xSize[...] = xres
ySize[...] = yres
outBlock = calcSlope(inBlock, xSize, ySize, fitPlane, zScale, winSize, minSlope)
# Check if writer exists, create one if not.
if writer is None:
writer = ImageWriter(outImage, info=info, firstblock=outBlock)
else:
writer.write(outBlock)
sys.stdout.write("\r 100 Percent Complete\n")
if calcStats:
# Close and calculate stats (for faster display)
print("Writing stats...")
writer.close(calcStats=True)
else:
writer.close(calcStats=False)
print("Done")
|
from django.db import models
# these are the two classes need to modify default Django user/admin login
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError("User must have an email address")
#make the email letter case insensitive a method from BaseUserManager
#email = self.normalize_email(email)
# .normalize_email is a BaseuserManager class method
"""create user model object without the password by using .model attribute from BaseUserManager class https://stackoverflow.com/questions/51163088/self-model-in-django-custom-usermanager
https://docs.djangoproject.com/en/3.0/topics/auth/customizing/ """
#user = self.model(email=email, name=name)
user = self.model(email=self.normalize_email(email), name=name)
# store the password not as string but an encripted to hash
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Crate and save a new superuser with given details"""
user = self.create_user(email, name, password)
# We did not specify is_superuser because is automatically created by PermissionMixin
user.is_superuser = True
# this method is specified in the UserProfile model
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users authentication in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
# Creates a user profile manager
objects = UserProfileManager()
# Overwriting the default Django username and replace it with USERNAEM_FIELD
# by taking user's email instead a user's name
USERNAME_FIELD = 'email'
# making the name as a requirement field
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of the user"""
return f'{self.email} is {self.name}'
#return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add = True)
def __str__(self):
"""Return the model as a string"""
return self.created_on
#return f'created on {self.created_on}. Content: {self.status_text} '
|
import numpy as np
import skimage.color as color
import skimage.transform as transform
rgb2gray = color.rgb2gray
gray2rgb = color.gray2rgb
imresize = transform.resize
imrescale = transform.rescale
def immerge(images, n_rows=None, n_cols=None, padding=0, pad_value=0):
"""Merge images to an image with (n_rows * h) * (n_cols * w).
Parameters
----------
images : numpy.array or object which can be converted to numpy.array
Images in shape of N * H * W(* C=1 or 3).
"""
images = np.array(images)
n = images.shape[0]
if n_rows:
n_rows = max(min(n_rows, n), 1)
n_cols = int(n - 0.5) // n_rows + 1
elif n_cols:
n_cols = max(min(n_cols, n), 1)
n_rows = int(n - 0.5) // n_cols + 1
else:
n_rows = int(n ** 0.5)
n_cols = int(n - 0.5) // n_rows + 1
h, w = images.shape[1], images.shape[2]
shape = (h * n_rows + padding * (n_rows - 1),
w * n_cols + padding * (n_cols - 1))
if images.ndim == 4:
shape += (images.shape[3],)
img = np.full(shape, pad_value, dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % n_cols
j = idx // n_cols
img[j * (h + padding):j * (h + padding) + h,
i * (w + padding):i * (w + padding) + w, ...] = image
return img
def immerge_(images, n_rows=None, n_cols=None, padding=0, pad_value=0):
"""Merge images to an image with (n_rows * h) * (n_cols * w).
Parameters
----------
images : numpy.array or object which can be converted to numpy.array
Images in shape of N * H * W(* C=1 or 3).
"""
images = np.array(images)
n = images.shape[0]
if n_rows:
n_rows = max(min(n_rows, n), 1)
n_cols = int(n - 0.5) // n_rows + 1
elif n_cols:
n_cols = max(min(n_cols, n), 1)
n_rows = int(n - 0.5) // n_cols + 1
else:
n_rows = int(n ** 0.5)
n_cols = int(n - 0.5) // n_rows + 1
h, w = images.shape[1], images.shape[2]
n_rows = 1;
n_cols = 1;
padding = 0;
#print(h);
#print(w);
#shape = (h * n_rows + padding * (n_rows - 1),
# w * n_cols + padding * (n_cols - 1))
shape = (h * n_rows, w * n_cols )
if images.ndim == 4:
shape += (images.shape[3],)
img = np.full(shape, pad_value, dtype=images.dtype)
for idx, image in enumerate(images):
i = idx % n_cols
j = idx // n_cols
img[j * (h + padding):j * (h + padding) + h,
i * (w + padding):i * (w + padding) + w, ...] = image
#print(np.shape(img))
return img
|
"""Finetuning BERT hugging gace (pytorch) model.
Author: Mohit Mayank
- Inspired from Masked Label modelling with BERT article
- Link: https://towardsdatascience.com/masked-language-modelling-with-bert-7d49793e5d2c
"""
# IMPORT =========
import pandas as pd
from tqdm import tqdm
import numpy as np
import numpy as np
# for deep learning
import torch
import torch.nn as nn
import torch.optim as optim
# load BERT model
from transformers import AdamW
from transformers import BertTokenizer, BertForMaskedLM
# MODEL LOAD =========
#model_path = "../input/roberta-base" # if local copy is present
model_path = "bert-base-uncased" # if local copy is not present
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForMaskedLM.from_pretrained(model_path)
# DATA PREP 1 =========
data = pd.read_csv("file_with_text.csv")
# tokenize
inputs = tokenizer(data, return_tensors='pt', max_length=250, truncation=True, padding='max_length')
inputs['labels'] = inputs.input_ids.detach().clone()
# create random array of floats with equal dimensions to input_ids tensor
rand = torch.rand(inputs.input_ids.shape)
# create mask array
# mask tokens except special tokens like CLS and SEP
mask_ratio = 0.3
mask_arr = (rand < mask_ratio) * (inputs.input_ids != 101) * \
(inputs.input_ids != 102) * (inputs.input_ids != 0)
# get the indices where to add mask
selection = []
for i in range(inputs.input_ids.shape[0]):
selection.append(
torch.flatten(mask_arr[i].nonzero()).tolist()
)
# add the mask
for i in range(inputs.input_ids.shape[0]):
inputs.input_ids[i, selection[i]] = 103
# DATA PREP 2 - DATALOADER =========
# define dataset class
class MLMDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __getitem__(self, idx):
return {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
def __len__(self):
return len(self.encodings.input_ids)
# create instance
dataset = MLMDataset(inputs)
loader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True)
# PRE_TRAIN =============
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# and move our model over to the selected device
model.to(device)
# activate training mode
model.train()
# initialize optimizer
optimizer = AdamW(model.parameters(), lr=5e-5)
# TRAIN =====================
epochs = 20
for epoch in range(epochs):
# setup loop with TQDM and dataloader
loop = tqdm(loader, leave=True)
for batch in loop:
# initialize calculated gradients (from prev step)
optimizer.zero_grad()
# pull all tensor batches required for training
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
# process
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
# extract loss
loss = outputs.loss
# calculate loss for every parameter that needs grad update
loss.backward()
# update parameters
optimizer.step()
# print relevant info to progress bar
loop.set_description(f'Epoch {epoch}')
loop.set_postfix(loss=loss.item())
# SAVE MODEL =====================
model.save_pretrained("bert_finetuned_on_text/")
tokenizer.save_pretrained("bert_finetuned_on_text/")
|
class OpcoesArquivos:
def __init__(self, diretorio):
self.diretorio = diretorio
def add_zeros(self, valor):
valor = str(valor).replace(',', '.')
valor = str(round(float(valor), 2))
if float(valor) == 0:
valor = '0.00'
else:
cont = -1
for i in valor:
if i == '.':
cont += 1
if cont >= 0:
cont += 1
if cont == 2:
valor += '0'
return valor
def criar_arquivo(self, nome, conteudo=''):
try:
arquivo = open(f'{self.diretorio}{nome}.txt', 'r')
if not arquivo.read():
OpcoesArquivos(self.diretorio).escrever_arquivo(nome, conteudo)
arquivo.close()
except:
OpcoesArquivos(self.diretorio).escrever_arquivo(nome, conteudo)
def escrever_arquivo(self, nome, conteudo=''):
arquivo = open(f'{self.diretorio}{nome}.txt', 'a')
arquivo.write(f'{conteudo}')
arquivo.close()
def apagar_arquivo(self, nome):
arquivo = open(f'{self.diretorio}{nome}.txt', 'w')
arquivo.write('')
arquivo.close()
def listar_dados(self, nome):
arquivo = open(f'{self.diretorio}{nome}.txt', 'r')
lista = arquivo.read().split(', ')
lista.pop(-1)
arquivo.close()
aux = []
for i in lista:
x = i.split(' - ')
aux.append(x)
for i in aux:
i[2] = OpcoesArquivos(self.diretorio).add_zeros(i[2])
return aux
def atualizar_total(self, nome):
aux = OpcoesArquivos(self.diretorio).listar_dados(nome)
aux.pop(-1)
ganho = 0.00
if nome == 'receitas':
for x, y, z in aux:
if int(x) < 6:
ganho += float(z.replace(',', '.'))
else:
ganho -= float(z.replace(',', '.'))
else:
for i in aux:
ganho += float(i[2].replace(',', '.'))
texto = ''
if nome == 'cartao':
for x, y, z, a, b in aux:
texto += f'{x} - {y} - {z} - {a} - {b}, '
else:
for x, y, z in aux:
texto += f'{x} - {y} - {z}, '
texto += f'{len(aux) + 1} - TOTAL - {OpcoesArquivos(self.diretorio).add_zeros(ganho)}, '
arquivo = open(f'{self.diretorio}{nome}.txt', 'w')
arquivo.write(texto)
arquivo.close()
if __name__ == '__main__': ## Local para testes
a = str(__file__).replace('arquivos.py','')
print(a)
|
import pathlib
from setuptools import setup
setup(
name="pytest-level",
version='0.1.1',
py_modules=["pytest_level"],
description='Select tests of a given level or lower',
long_description=pathlib.Path('README.md').read_text(),
long_description_content_type="text/markdown",
author='Petr Viktorin',
author_email='encukou@gmail.com',
license='MIT',
url='https://github.com/encukou/pytest-level',
entry_points={"pytest11": ["name_of_plugin = pytest_level"]},
classifiers=[
'Framework :: Pytest',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
setup_requires=['pytest'],
zip_safe=False,
)
|
from interactable import Interactable, Button, TextBox
from textProcessor import TextProcessor
from imageProcessor import ImageProcessor
import pygame as pg
import config as cfg
class GameNode(Interactable):
def __init__(self, row_id=None, option_id=None, data=None):
self.row_id = row_id
self.option_id = option_id
self.data = data
self.children = []
self.rendered_models = []
self.text_box = None
self.continue_button = None
self.button = None
self.last_text_box = None
self.box_dimension = None
self.button_dimension = None
self.last_text_box_bool = False
def get_text_box(self, next_nodes, color):
if self.text_box == None:
self.reset_text_box(next_nodes, color)
else:
self.text_box.text_obj.reset_scroll()
def reset_text_box(self, next_nodes, color):
continue_button = Button(TextProcessor("Continue", "center", cfg.CONTINUE_WIDTH * 0.9, cfg.CONTINUE_HEIGHT * 0.85, cfg.CONTINUE_WIDTH * 0.72, cfg.CONTINUE_HEIGHT * 0.425, box= cfg.CONTINUE_BOX_RECT, opacity = 200, box_color = color), next_nodes, args=self, trigger=[pg.MOUSEBUTTONDOWN, pg.K_SPACE, pg.K_RETURN])
self.text_box = TextBox(TextProcessor(self.data.text, "left", cfg.DEFAULT_TEXT_BOX[2] / 1.015, cfg.DEFAULT_TEXT_BOX[3] / 1.02, cfg.DEFAULT_TEXT_BOX[2] * 0.7, cfg.DEFAULT_TEXT_BOX[3] * 0.7, adjust=True, scroll=True, opacity = 100, box_color=color), continue_button)
def get_button(self, next_nodes, color):
self.button = Button(TextProcessor(self.data.text, "center", self.button_dimension[0], self.button_dimension[1], self.button_dimension[2], self.button_dimension[3], box=self.box_dimension, opacity = 200, box_color = color), next_nodes, args=self, trigger=self.trigger)
def get_last_text_box(self, color):
return TextProcessor(self.data.text, "center", cfg.LAST_TEXT_BOX[2] / 1.025, cfg.LAST_TEXT_BOX[3] / 1.05, cfg.LAST_TEXT_BOX[2] * 0.7, cfg.LAST_TEXT_BOX[3] * 0.7, box = cfg.LAST_TEXT_BOX, opacity = 100, box_color = color)
def add_child(self, child_node):
self.children.append(child_node)
def add_parent(self, parent_node):
if self not in parent_node.children:
parent_node.add_child(self)
def remove_child(self, node):
if node in self.children:
self.children.remove(node)
def get_children(self):
return self.children
def render_background(self):
return ImageProcessor(self.data.bg, h=cfg.WINDOW_HEIGHT, base="bgs")
def render_models(self):
return [] if self.data.models == None else [ImageProcessor(model_img, h=cfg.WINDOW_HEIGHT, base="models") for model_img in self.data.models.split()]
def blit(self, target):
if self.text_box != None:
self.text_box.blit(target)
if self.button != None:
self.button.blit(target)
if self.last_text_box != None:
self.last_text_box.blit(target)
def event(self, event, observer):
if self.button != None:
self.button.event(event, observer)
if self.text_box != None:
self.text_box.event(event, observer)
class CellData:
def __init__(self, conditional=None, destination=None, text = None, incoming = None, leaving = None, ending = None, noise = "None", models = None, bg = None, music = None):
if conditional == None:
conditional = []
else:
conditional = conditional.replace("(", "").replace(")", "").split()
for index, condition in enumerate(conditional):
conditional[index] = condition.split(",")
self.conditionals = conditional
self.destination = destination
self.text = text
self.incoming = incoming
self.leaving = leaving
self.ending = ending
self.noise = noise
self.bg = bg
self.music = music
self.models = models
class EndNode(Interactable):
BUTTON_WIDTH = round(cfg.WINDOW_WIDTH / 3)
BUTTON_MARGIN = round((cfg.WINDOW_WIDTH - BUTTON_WIDTH * 2) / 3)
BUTTON_DIMENSION = BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425
button1_box_dimension = (BUTTON_MARGIN, cfg.BUTTON_Y, BUTTON_WIDTH, cfg.BUTTON_BASE_HEIGHT)
button2_box_dimension = (BUTTON_WIDTH + BUTTON_MARGIN * 2, cfg.BUTTON_Y, BUTTON_WIDTH, cfg.BUTTON_BASE_HEIGHT)
button3_box_dimension = (cfg.WINDOW_WIDTH/2 - BUTTON_WIDTH/2, cfg.BUTTON_Y, BUTTON_WIDTH, cfg.BUTTON_BASE_HEIGHT)
def __init__(self, get_option_objs, update_active_objs, update_sounds, main_menu_node, data, gear=[], win_node = ""):
self.get_option_objs, self.update_active_objs, self.update_sounds = get_option_objs, update_active_objs, update_sounds
self.main_menu_node = main_menu_node
self.gear = gear
self.win_node = win_node
self.game_data = data
self.data=CellData(music="end")
self.buttons = [
Button(TextProcessor("Restart", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button1_box_dimension,opacity=200,box_color = self.game_data.data_dict["box_color"]), self.restart, trigger = [pg.MOUSEBUTTONDOWN, pg.K_1]),
Button(TextProcessor("Main Menu", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button2_box_dimension,opacity=200,box_color = self.game_data.data_dict["box_color"]), self.main_menu, trigger = [pg.MOUSEBUTTONDOWN, pg.K_2]),
]
self.win_button = Button(TextProcessor("Continue", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button3_box_dimension,opacity=200,box_color = self.game_data.data_dict["box_color"]), self.assign_win_node, trigger = [pg.MOUSEBUTTONDOWN, pg.K_1])
def reset_buttons(self):
self.buttons = [
Button(TextProcessor("Restart", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button1_box_dimension,opacity=200,box_color = self.game_data.data_dict["box_color"]), self.restart, trigger = [pg.MOUSEBUTTONDOWN, pg.K_1]),
Button(TextProcessor("Main Menu", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button2_box_dimension,opacity=200,box_color = self.game_data.data_dict["box_color"]), self.main_menu, trigger = [pg.MOUSEBUTTONDOWN, pg.K_2]),
]
def event(self, event, observer):
if len(self.game_data.data_dict["endings"]) != 8:
for button in self.buttons:
button.event(event, observer)
def blit(self, target):
self.end_text_box.blit(target)
if len(self.game_data.data_dict["endings"]) != 8:
for button in self.buttons:
button.blit(target)
else:
self.win_button.blit(target)
def restart(self, arg):
next_nodes = self.get_option_objs("0")
self.update_sounds(next_nodes[0].data.music, next_nodes[0].data.noise)
self.update_active_objs("nodes", next_nodes)
self.update_active_objs("gear", [self.gear])
self.update_active_objs("background", [next_nodes[0].render_background()])
self.update_active_objs("models", next_nodes[0].render_models())
self.game_data.data_dict["backpack"] = False
self.game_data.data_dict["inventory"] = []
self.game_data.data_dict["traversed_rows"] = []
def main_menu(self, arg):
self.game_data.data_dict["option"] = "0"
self.game_data.data_dict["backpack"] = False
self.game_data.data_dict["inventory"] = []
self.main_menu_node.reset_buttons()
self.update_active_objs("nodes", [self.main_menu_node])
self.update_active_objs("background", [self.main_menu_node.render_background()])
def render_background(self):
return ImageProcessor(self.game_data.data_dict["endings"][-1][7], h=cfg.WINDOW_HEIGHT, base="bgs")
def assign_win_node(self, arg):
self.update_active_objs("nodes", [self.win_node])
def get_end_text_box(self, end):
self.end_text_box =TextProcessor(end, "center", cfg.LAST_TEXT_BOX[2] / 1.2, cfg.LAST_TEXT_BOX[3] / 1.2, cfg.LAST_TEXT_BOX[2] * 0.6, cfg.LAST_TEXT_BOX[3] * 0.6, box = cfg.LAST_TEXT_BOX, opacity = 100, box_color = self.game_data.data_dict["box_color"], font_size = 10)
class HellEndNode(EndNode):
def __init__(self, get_option_objs, update_active_objs, update_sounds, main_menu_node, data, backpack, gear, win_node = ""):
super().__init__(get_option_objs, update_active_objs, update_sounds, main_menu_node, data, win_node = win_node)
self.buttons[0] = Button(TextProcessor("Try Again", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button1_box_dimension,opacity=200), self.try_again, trigger = [pg.MOUSEBUTTONDOWN, pg.K_1])
self.backpack = backpack
self.gear = gear
def reset_buttons(self):
self.buttons = [
Button(TextProcessor("Try Again", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button1_box_dimension,opacity=200,box_color = self.game_data.data_dict["box_color"]), self.try_again, trigger = [pg.MOUSEBUTTONDOWN, pg.K_1]),
Button(TextProcessor("Main Menu", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button2_box_dimension,opacity=200,box_color = self.game_data.data_dict["box_color"]), self.main_menu, trigger = [pg.MOUSEBUTTONDOWN, pg.K_2]),
]
def try_again(self, arg):
next_nodes = self.get_option_objs("706")
self.update_sounds(next_nodes[0].data.music, next_nodes[0].data.noise)
self.update_active_objs("nodes", next_nodes)
self.update_active_objs("background", [next_nodes[0].render_background()])
self.update_active_objs("backpack", [self.backpack])
self.update_active_objs("gear", [self.gear])
self.update_active_objs("models", next_nodes[0].render_models())
class StartNode(Interactable):
BUTTON_WIDTH = round(cfg.WINDOW_WIDTH / 3)
BUTTON_MARGIN = round((cfg.WINDOW_WIDTH - BUTTON_WIDTH * 2) / 3)
BUTTON_DIMENSION = BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425
button1_box_dimension = (BUTTON_MARGIN, cfg.BUTTON_Y, BUTTON_WIDTH, cfg.BUTTON_BASE_HEIGHT)
button2_box_dimension = (BUTTON_WIDTH + BUTTON_MARGIN * 2, cfg.BUTTON_Y, BUTTON_WIDTH, cfg.BUTTON_BASE_HEIGHT)
def __init__(self, get_option_objs, update_active_objs, update_sounds, data, backpack, gear):
self.get_option_objs, self.update_active_objs, self.update_sounds = get_option_objs, update_active_objs, update_sounds
self.game_data = data
self.backpack = backpack
self.gear = gear
self.buttons = [
Button(TextProcessor("Continue", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button1_box_dimension, opacity=200, box_color = self.game_data.data_dict["box_color"]), self.continue_game, trigger = [pg.MOUSEBUTTONDOWN, pg.K_1]),
Button(TextProcessor("New Game", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button2_box_dimension, opacity=200, box_color = self.game_data.data_dict["box_color"]), self.new_game, trigger = [pg.MOUSEBUTTONDOWN, pg.K_2]),
]
self.update_active_objs("background", [self.render_background()])
def render_background(self):
return ImageProcessor("mainmenu", h=cfg.WINDOW_HEIGHT, base="bgs")
def reset_buttons(self):
self.buttons = [
Button(TextProcessor("Continue", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button1_box_dimension, opacity=200, box_color = self.game_data.data_dict["box_color"]), self.continue_game, trigger = [pg.MOUSEBUTTONDOWN, pg.K_1]),
Button(TextProcessor("New Game", "center", self.BUTTON_WIDTH * 0.9, cfg.BUTTON_BASE_HEIGHT * 0.85, self.BUTTON_WIDTH * 0.72, cfg.BUTTON_BASE_HEIGHT * 0.425, box=self.button2_box_dimension, opacity=200, box_color = self.game_data.data_dict["box_color"]), self.new_game, trigger = [pg.MOUSEBUTTONDOWN, pg.K_2]),
]
def event(self, event, observer):
for button in self.buttons:
button.event(event, observer)
def blit(self, target):
self.update_sounds("intro", "None")
for button in self.buttons:
button.blit(target)
def continue_game(self, arg):
last_text_box = TextProcessor(self.game_data.data_dict["last_text"], "center", cfg.LAST_TEXT_BOX[2] / 1.025, cfg.LAST_TEXT_BOX[3] / 1.05, cfg.LAST_TEXT_BOX[2] * 0.7, cfg.LAST_TEXT_BOX[3] * 0.7, box = cfg.LAST_TEXT_BOX, box_color=self.game_data.data_dict["box_color"],opacity = 100)
next_nodes = self.get_option_objs(self.game_data.data_dict["option"])
if len(next_nodes) != 1:
next_nodes[0].last_text_box = last_text_box
self.update_sounds(next_nodes[0].data.music, next_nodes[0].data.noise)
self.update_active_objs("nodes", next_nodes)
self.update_active_objs("background", [next_nodes[0].render_background()])
self.update_active_objs("models", next_nodes[0].render_models())
self.update_active_objs("gear", [self.gear])
if self.game_data.data_dict["backpack"] == True:
self.update_active_objs("backpack", [self.backpack])
def new_game(self, arg):
self.game_data.data_dict["option"] = "0"
self.game_data.data_dict["traversed_rows"] = []
self.game_data.data_dict["inventory"] = []
self.game_data.data_dict["endings"] = []
next_nodes = self.get_option_objs(self.game_data.data_dict["option"])
self.update_sounds(next_nodes[0].data.music, "None")
self.update_active_objs("background", [next_nodes[0].render_background()])
self.update_active_objs("gear", [self.gear])
self.update_active_objs("nodes", next_nodes)
class WinNode(EndNode):
def __init__(self, get_option_objs, update_active_objs, update_sounds, main_menu_node, data):
super().__init__(get_option_objs, update_active_objs, update_sounds, main_menu_node, data)
self.get_end_text_box("Congratulations, you found all the endings!")
|
import heterocl as hcl
def test_tensor_slice_shape():
A = hcl.compute((2,10), lambda i,j: 0)
assert A[0].shape == (10,)
|
BASE_MAPPER = {
'uuid': 'Id',
'name': 'Name',
'state': 'Status'
}
L3_NETWORK_MAPPER = {
'uuid': 'Id',
'name': 'Name',
'l2NetworkUuid': 'NetworkId'
}
PowerState_Mapper = {
'Running': 'POWERED_ON',
'Stopped': 'POWERED_OFF',
'Unknown': 'UNKNOWN',
'other': 'SUSPENDED'
}
|
from dataclasses import asdict, dataclass
from typing import Iterable, Optional, Union
import numpy as np
import pandas as pd
from .dist_funs import DEFAULT_DIST_PAIR_DICT, DistanceNormalizer
@dataclass
class PairBase:
df1: pd.DataFrame
df2: pd.DataFrame
name: Optional[str] = None
def __getitem__(self, item):
return [self.df1, self.df2][item]
def __iter__(self):
yield self.df1
yield self.df2
def __repr__(self):
return (
f"{type(self).__name__}: {self.name} - "
f"{self.df1.shape}, {self.df2.shape}"
)
def sample(
self,
init_sample_left: Union[None, int, list, np.ndarray],
init_sample_right: Union[None, int, list, np.ndarray],
dropleft: Iterable = None,
dropright: Iterable = None,
):
df1, df2 = [
_df.pipe(_dodrop, todrop).pipe(_dosample, sample_size)
for _df, sample_size, todrop in zip(
self,
[init_sample_left, init_sample_right],
[dropleft, dropright],
)
]
kls = type(self)
return kls(**{**asdict(self), "df1": df1, "df2": df2})
@classmethod
def concat(cls, pairs, ignore_index=False):
df1, df2 = [
pd.concat([p[i] for p in pairs], ignore_index=ignore_index)
for i in range(2)
]
return cls(**{**asdict(pairs[0]), "df1": df1, "df2": df2})
@property
def one_empty(self):
return self.min_size == 0
@property
def min_size(self):
return min(self.df1.shape[0], self.df2.shape[0])
@property
def n_cols(self):
return self.df1.shape[1]
@property
def total_size(self):
return self.df1.shape[0] + self.df2.shape[0]
@dataclass(repr=False)
class EntitySetPair(PairBase):
distance_metrics: Optional[list] = None
distance_normalizer: Optional[DistanceNormalizer] = None
sample_count_for_dist_normalizer: int = 5000
def __post_init__(self):
if self.name is None:
self.name = self.df1.index.name
if self.distance_metrics is None:
self.distance_metrics = [
DEFAULT_DIST_PAIR_DICT[t] for t in self.df1.dtypes
]
if self.distance_normalizer is None:
self.distance_normalizer = DistanceNormalizer.from_raw_array(
self._get_raw_distance_values(
pd.MultiIndex.from_arrays(
[
_df.sample(
self.sample_count_for_dist_normalizer,
replace=True,
).index
for _df in self
]
).drop_duplicates()
)
)
def get_distance_series(self, left_indices, right_indices) -> pd.Series:
distance_keys = pd.MultiIndex.from_arrays(
[left_indices, right_indices]
)
key_pairs = distance_keys
return pd.Series(
self.distance_normalizer.transform(
self._get_raw_distance_values(key_pairs)
),
index=key_pairs,
dtype=np.float64,
)
def _get_raw_distance_values(self, key_pairs) -> np.ndarray:
# if relation points to nonexistent ind
# keyerror here
side_values = [
self[side].loc[key_pairs.get_level_values(side), :].values
for side in range(2)
]
return np.concatenate(
[
dist_fun(
side_values[0][:, idx],
side_values[1][:, idx],
)[:, np.newaxis]
for idx, dist_fun in enumerate(self.distance_metrics)
],
axis=1,
)
@dataclass(repr=False)
class MotifPair(PairBase):
entity_types_of_columns: Optional[list] = None
def extend(
self, relation_pair: "RelationPair", source_col=None, inverse=False
) -> "MotifPair":
return relation_pair.megre(
self,
source_col if source_col is not None else self.n_cols - 1,
inverse,
)
@classmethod
def root_from_indices(
cls,
left_indices,
right_indices,
entity_type,
) -> "MotifPair":
return cls(
df1=pd.DataFrame({0: left_indices}),
df2=pd.DataFrame({0: right_indices}),
name=entity_type,
entity_types_of_columns=[entity_type],
)
@property
def leaf_entity_type(self):
return self.entity_types_of_columns[-1]
@dataclass(repr=False)
class RelationPair(PairBase):
entity_types_of_columns: Optional[list] = None
def __post_init__(self):
if self.name is None:
if self.df1.index.name is None:
self.name = "-".join(map(str, self.df1.columns))
else:
self.name = self.df1.index.name
if self.entity_types_of_columns is None:
self.entity_types_of_columns = self.df1.columns.tolist()
def megre(
self,
nh_pair: MotifPair,
source_col: int,
inverse=False,
) -> MotifPair:
_cs = -1 if inverse else 1
new_name = f"{nh_pair.name}--{self.name}"
new_type = self.entity_types_of_columns[int(not inverse)]
etypes = nh_pair.entity_types_of_columns + [new_type]
neigh_dfs = [
neighdf.merge(
reldf.rename(
columns=dict(
zip(reldf.columns[::_cs], [source_col, nh_pair.n_cols])
)
),
how="inner",
)
for reldf, neighdf in zip(self, nh_pair)
]
return MotifPair(*neigh_dfs, new_name, etypes)
def _dosample(df, size):
if isinstance(size, (list, np.ndarray, pd.Index)):
return df.loc[size, :]
return df if (size is None or size > df.shape[0]) else df.sample(size)
def _dodrop(df, todrop):
if todrop is None:
return df
return df.drop(todrop)
|
def models(app):
return 'model 1', 'model 2', 'model 3'
__drops__ = models
|
import os, numpy, random, time, math
import torch
import torch.nn.functional as F
from ssl_lib.utils import Bar, AverageMeter
def supervised_train(epoch,train_loader, model,optimizer,lr_scheduler, cfg,device):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
labeled_acc = AverageMeter()
n_iter = cfg.n_imgs_per_epoch // cfg.l_batch_size
bar = Bar('Supervised Training', max=n_iter)
end = time.time()
for batch_idx, (data_l, _) in enumerate(train_loader):
inputs_l, labels = data_l
inputs_l, labels = inputs_l.to(device), labels.to(device)
data_time.update(time.time() - end)
bs = inputs_l.size(0)
cur_iteration = epoch*cfg.per_epoch_steps+batch_idx
logits_l = model(inputs_l)
loss = F.cross_entropy(logits_l, labels)
# update parameters
cur_lr = optimizer.param_groups[0]["lr"]
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_scheduler.step()
acc_l = (logits_l.max(1)[1] == labels).float().mean()
losses.update(loss.item())
labeled_acc.update(acc_l.item())
batch_time.update(time.time() - end)
end = time.time()
if (batch_idx+1) % 10==0:
bar.suffix = ("{batch:4}/{iter:4}. LR:{lr:.6f}. Data:{dt:.3f}s. Batch:{bt:.3f}s. Loss:{loss:.4f}. Acc_L:{acc_l:.4f}.".format(
batch=batch_idx+1,
iter=n_iter,
lr=cur_lr,
dt=data_time.avg,
bt=batch_time.avg,
loss=losses.avg,
acc_l=labeled_acc.avg))
bar.next()
bar.finish()
return losses.avg, labeled_acc.avg
|
import os
import base64
import discord
# HTTP headers
headers = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Max-Age": "3600",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0"
# "Authorization": base64.b64encode(bytes("Basic username:password", "utf-8"))
}
# Log-in for youtube to download age-restricted videos ~this still doesn't solve the issue, I don't understand why~
YT_MAIL = os.getenv("YT_MAIL")
YT_PASS = os.getenv("YT_PASS")
# youtube-dl wants cookies as a text file ~this also doesn't solve the age-restriction issue~
with open("cookies.txt", "w") as text_file:
print(os.getenv('COOKIE_DATA'), file=text_file)
ytdl_format_options = {
"cookies": "cookies.txt",
"user_agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0",
"username": YT_MAIL,
"password": YT_PASS,
"format": "bestaudio/best",
"outtmpl": "%(extractor)s-%(id)s-%(title)s.%(ext)s",
"restrictfilenames": True,
"noplaylist": True,
"nocheckcertificate": True,
"ignoreerrors": False,
"logtostderr": False,
"quiet": True,
"no_warnings": True,
"default_search": "auto",
"source_address": "0.0.0.0" # Bind to IPv4 since IPv6 addresses cause issues sometimes
}
# -vn discards video stream
ffmpeg_options = {
"options": "-vn"
}
# Bot's discord activities
activities = [
discord.Game(name="with křemík."),
discord.Activity(type=discord.ActivityType.listening, name="frequencies."),
discord.Activity(type=discord.ActivityType.watching, name="you.")
]
geckodriver_path = os.getenv("GECKODRIVER_PATH")
firefox_bin = os.getenv("FIREFOX_BIN")
|
"""Run distributed tasks using the Celery distributed task queue.
http://celeryproject.org/
"""
import os
import sys
import time
import contextlib
import multiprocessing
from mako.template import Template
try:
import joblib
except ImportError:
joblib = False
from bcbio import utils
from bcbio.distributed import ipython
from bcbio.log import logger
from bcbio.provenance import diagnostics, system
def parallel_runner(parallel, dirs, config, config_file=None):
"""Process a supplied function: single, multi-processor or distributed.
"""
def run_parallel(fn_name, items, metadata=None):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
imodule = parallel.get("module", "bcbio.distributed")
sysinfo = system.get_info(dirs, parallel)
if parallel["type"].startswith("messaging"):
task_module = "{base}.tasks".format(base=imodule)
runner_fn = runner(task_module, dirs, config, config_file)
return runner_fn(fn_name, items)
elif parallel["type"] == "ipython":
return ipython.runner(parallel, fn_name, items, dirs["work"], sysinfo, config)
else:
logger.info("multiprocessing: %s" % fn_name)
fn = getattr(__import__("{base}.multitasks".format(base=imodule),
fromlist=["multitasks"]),
fn_name)
jobr = ipython.find_job_resources([fn], parallel, items, sysinfo, config)
items = [ipython.add_cores_to_config(x, jobr.cores_per_job) for x in items]
if joblib is None:
raise ImportError("Need joblib for multiprocessing parallelization")
out = []
for data in joblib.Parallel(jobr.num_jobs)(joblib.delayed(fn)(x) for x in items):
if data:
out.extend(data)
return out
return run_parallel
def runner(task_module, dirs, config, config_file, wait=True):
"""Run a set of tasks using Celery, waiting for results or asynchronously.
Initialize with the configuration and directory information,
used to prepare a Celery configuration file and imports. It
returns a function which acts like standard map; provide the function
name instead of the function itself when calling.
After name lookup, Celery runs the function in parallel; Celery servers
can be remote or local but must have access to a shared filesystem. The
function polls if wait is True, returning when all results are available.
"""
with create_celeryconfig(task_module, dirs, config, config_file):
sys.path.append(dirs["work"])
__import__(task_module)
tasks = sys.modules[task_module]
from celery.task.sets import TaskSet
def _run(fn_name, xs):
fn = getattr(tasks, fn_name)
job = TaskSet(tasks=[apply(fn.subtask, (x,)) for x in xs])
result = job.apply_async()
out = []
if wait:
with _close_taskset(result):
while not result.ready():
time.sleep(5)
if result.failed():
raise ValueError("Failed distributed task; cleaning up")
for x in result.join():
if x:
out.extend(x)
return out
return _run
@contextlib.contextmanager
def _close_taskset(ts):
"""Revoke existing jobs if a taskset fails; raise original error.
"""
try:
yield None
except:
try:
raise
finally:
try:
ts.revoke()
except:
pass
# ## Utility functions
_celeryconfig_tmpl = """
CELERY_IMPORTS = ("${task_import}", )
BROKER_URL = "amqp://${userid}:${password}@${host}:${port}/${rabbitmq_vhost}"
CELERY_RESULT_BACKEND= "amqp"
CELERY_TASK_SERIALIZER = "json"
CELERYD_CONCURRENCY = ${cores}
CELERY_ACKS_LATE = False
CELERYD_PREFETCH_MULTIPLIER = 1
BCBIO_CONFIG_FILE = "${config_file}"
"""
@contextlib.contextmanager
def create_celeryconfig(task_module, dirs, config, config_file):
amqp_config = utils.read_galaxy_amqp_config(config["galaxy_config"], dirs["config"])
if not amqp_config.has_key("host") or not amqp_config.has_key("userid"):
raise ValueError("universe_wsgi.ini does not have RabbitMQ messaging details set")
out_file = os.path.join(dirs["work"], "celeryconfig.py")
amqp_config["rabbitmq_vhost"] = config["distributed"]["rabbitmq_vhost"]
cores = config["distributed"].get("cores_per_host", 0)
if cores < 1:
cores = multiprocessing.cpu_count()
amqp_config["cores"] = cores
amqp_config["task_import"] = task_module
amqp_config["config_file"] = config_file
with open(out_file, "w") as out_handle:
out_handle.write(Template(_celeryconfig_tmpl).render(**amqp_config))
try:
yield out_file
finally:
pyc_file = "%s.pyc" % os.path.splitext(out_file)[0]
for fname in [pyc_file, out_file]:
if os.path.exists(fname):
os.remove(fname)
|
#!/usr/bin/env python3
"""
Tests for the inner product Tensorflow operation.
.. moduleauthor:: David Stutz
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from matplotlib import pyplot as plt
Poisson_module = tf.load_op_library('build/libPoissonOp.so')
PoissonGrad_module = tf.load_op_library('build/libPoissonGradOp.so')
@ops.RegisterGradient("PoissonOp")
def poisson_grad_cc(op, grad):
return PoissonGrad_module.poisson_grad(grad, op.inputs[0], op.inputs[1], op.inputs[2])
sess = tf.Session()
len_z = 200
len_x = 200
h = 1/(2**1)
nz = int(len_z/h + 1)
nx = int(len_x/h + 1)
rho = 1000.0
G = 9.8
g_np = np.zeros((nz, nx))
coef_np = np.zeros((nz, nx))
p_true = np.zeros((nz, nx))
# for i in range(nz):
# for j in range(nx):
# g_np[i,j] = np.sin(2*np.pi/len_z*i*h)*np.sin(2*np.pi/len_x*j*h)
# coef_np[i,j] = 1.0-np.cos(2*np.pi/len_z*i*h)*np.sin(2*np.pi/len_x*j*h)*len_z/(2*np.pi*rho*G)
# p_true[i,j] = rho*G*i*h
for i in range(nz):
for j in range(nx):
g_np[i,j] = 2*i*h*np.exp(-(i*h)**2-(j*h)**2) * rho * G
coef_np[i,j] = 1 + np.exp(-(i*h)**2-(j*h)**2)
p_true[i,j] = rho*G*i*h
p_true = p_true - np.mean(p_true)
# coef_np = np.ones((nz, nx))
# g_np = np.zeros((nz, nx))
# g_np[5,5] = -1.0
# g_np[15,15] = 1.0
print(np.mean(g_np))
coef = tf.constant(coef_np, dtype=tf.float64)
# g_np = g_np - np.mean(g_np)
g = tf.constant(g_np, dtype=tf.float64)
p = Poisson_module.poisson_op(coef, g, h, G*rho, 2)
# g = tf.gradients(tf.reduce_sum(p), coef, g, h)
# print(sess.run(p))
# print(sess.run(g))
p_inv_np = sess.run(p)
p_inv_np = p_inv_np - np.mean(p_inv_np)
plt.subplot(1,2,1)
plt.imshow(p_inv_np)
plt.colorbar()
plt.title('inv')
plt.subplot(1,2,2)
plt.imshow(p_true)
plt.colorbar()
plt.show()
# sess = tf.Session()
# h = 0.2
# nz = 5
# nx = 5
# rho = 1000.0
# G = 0.0
# g_np = np.zeros((nz, nx))
# g_np[0,0]=25.0
# g_np[4,4]=-25.0
# coef_np = np.ones((nz, nx))
# print(np.mean(g_np))
# coef = tf.constant(coef_np, dtype=tf.float64)
# # g_np = g_np - np.mean(g_np)
# g = tf.constant(g_np, dtype=tf.float64)
# p = Poisson_module.poisson_op(coef, g, h, G*rho, 1)
# # g = tf.gradients(tf.reduce_sum(p), coef, g, h)
# # print(sess.run(p))
# # print(sess.run(g))
# p_inv_np = sess.run(p)
# # p_inv_np = p_inv_np - np.mean(p_inv_np)
# plt.imshow(p_inv_np)
# plt.colorbar()
# plt.title('inv')
# plt.show()
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import six
from monty.io import zopen
from monty.re import regrep
from collections import defaultdict
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.io_utils import clean_lines
"""
This module implements input and output processing from PWSCF.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "3/27/15"
class PWInput(object):
"""
Base input file class. Right now, only supports no symmetry and is
very basic.
"""
def __init__(self, structure, pseudo=None, control=None, system=None,
electrons=None, ions=None, cell=None, kpoints_mode="automatic",
kpoints_grid=(1, 1, 1),kpoints_shift=(0, 0, 0)):
"""
Initializes a PWSCF input file.
Args:
structure (Structure): Input structure. For spin-polarized calculation,
properties (e.g. {"starting_magnetization": -0.5,
"pseudo": "Mn.pbe-sp-van.UPF"}) on each site is needed instead of
pseudo (dict).
pseudo (dict): A dict of the pseudopotentials to use. Default to None.
control (dict): Control parameters. Refer to official PWSCF doc
on supported parameters. Default to {"calculation": "scf"}
system (dict): System parameters. Refer to official PWSCF doc
on supported parameters. Default to None, which means {}.
electrons (dict): Electron parameters. Refer to official PWSCF doc
on supported parameters. Default to None, which means {}.
ions (dict): Ions parameters. Refer to official PWSCF doc
on supported parameters. Default to None, which means {}.
cell (dict): Cell parameters. Refer to official PWSCF doc
on supported parameters. Default to None, which means {}.
kpoints_mode (str): Kpoints generation mode. Default to automatic.
kpoints_grid (sequence): The kpoint grid. Default to (1, 1, 1).
kpoints_shift (sequence): The shift for the kpoints. Defaults to
(0, 0, 0).
"""
self.structure = structure
sections = {}
sections["control"] = control or {"calculation": "scf"}
sections["system"] = system or {}
sections["electrons"] = electrons or {}
sections["ions"] = ions or {}
sections["cell"] = cell or {}
if pseudo == None:
for site in structure:
try:
site.properties['pseudo']
except KeyError:
raise PWInputError("Missing %s in pseudo specification!"
% site)
else:
for species in self.structure.composition.keys():
if species.symbol not in pseudo:
raise PWInputError("Missing %s in pseudo specification!"
% species.symbol)
self.pseudo = pseudo
self.sections = sections
self.kpoints_mode = kpoints_mode
self.kpoints_grid = kpoints_grid
self.kpoints_shift = kpoints_shift
def __str__(self):
out = []
site_descriptions = {}
if self.pseudo != None:
site_descriptions = self.pseudo
else:
c = 1
for site in self.structure:
name = None
for k, v in site_descriptions.items():
if site.properties == v:
name = k
if name == None:
name = site.specie.symbol+str(c)
site_descriptions[name] = site.properties
c += 1
def to_str(v):
if isinstance(v, six.string_types):
return "'%s'" % v
elif isinstance(v, float):
return "%s" % str(v).replace("e", "d")
elif isinstance(v, bool):
if v:
return ".TRUE."
else:
return ".FALSE."
return v
for k1 in ["control", "system", "electrons", "ions", "cell"]:
v1 = self.sections[k1]
out.append("&%s" % k1.upper())
sub = []
for k2 in sorted(v1.keys()):
if isinstance(v1[k2], list):
n = 1
for l in v1[k2][:len(site_descriptions)]:
sub.append(" %s(%d) = %s" % (k2, n, to_str(v1[k2][n-1])))
n += 1
else:
sub.append(" %s = %s" % (k2, to_str(v1[k2])))
if k1 == "system":
if 'ibrav' not in self.sections[k1]:
sub.append(" ibrav = 0")
if 'nat' not in self.sections[k1]:
sub.append(" nat = %d" % len(self.structure))
if 'ntyp' not in self.sections[k1]:
sub.append(" ntyp = %d" % len(site_descriptions))
sub.append("/")
out.append(",\n".join(sub))
out.append("ATOMIC_SPECIES")
for k, v in sorted(site_descriptions.items(), key=lambda i: i[0]):
e = re.match(r"[A-Z][a-z]?", k).group(0)
if self.pseudo is not None:
p = v
else:
p = v['pseudo']
out.append(" %s %.4f %s" % (k, Element(e).atomic_mass, p))
out.append("ATOMIC_POSITIONS crystal")
if self.pseudo is not None:
for site in self.structure:
out.append(" %s %.6f %.6f %.6f" % (site.specie.symbol, site.a,
site.b, site.c))
else:
for site in self.structure:
name = None
for k, v in sorted(site_descriptions.items(),
key=lambda i: i[0]):
if v == site.properties:
name = k
out.append(" %s %.6f %.6f %.6f" % (name, site.a, site.b, site.c))
out.append("K_POINTS %s" % self.kpoints_mode)
kpt_str = ["%s" % i for i in self.kpoints_grid]
kpt_str.extend(["%s" % i for i in self.kpoints_shift])
out.append(" %s" % " ".join(kpt_str))
out.append("CELL_PARAMETERS angstrom")
for vec in self.structure.lattice.matrix:
out.append(" %f %f %f" % (vec[0], vec[1], vec[2]))
return "\n".join(out)
def write_file(self, filename):
"""
Write the PWSCF input file.
Args:
filename (str): The string filename to output to.
"""
with open(filename, "w") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
"""
Reads an PWInput object from a file.
Args:
filename (str): Filename for file
Returns:
PWInput object
"""
with zopen(filename, "rt") as f:
return PWInput.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads an PWInput object from a string.
Args:
string (str): PWInput string
Returns:
PWInput object
"""
lines = list(clean_lines(string.splitlines()))
def input_mode(line):
if line[0] == "&":
return ("sections", line[1:].lower())
elif "ATOMIC_SPECIES" in line:
return ("pseudo", )
elif "K_POINTS" in line:
return ("kpoints", line.split("{")[1][:-1])
elif "CELL_PARAMETERS" in line or "ATOMIC_POSITIONS" in line:
return ("structure", line.split("{")[1][:-1])
elif line == "/":
return None
else:
return mode
sections = {"control": {}, "system": {}, "electrons": {},
"ions": {}, "cell":{}}
pseudo = {}
pseudo_index = 0
lattice = []
species = []
coords = []
structure = None
site_properties = {"pseudo":[]}
mode = None
for line in lines:
mode = input_mode(line)
if mode == None:
pass
elif mode[0] == "sections":
section = mode[1]
m = re.match(r'(\w+)\(?(\d*?)\)?\s*=\s*(.*)', line)
if m:
key = m.group(1).strip()
key_ = m.group(2).strip()
val = m.group(3).strip()
if key_ != "":
if sections[section].get(key, None) == None:
val_ = [0.0]*20 # MAX NTYP DEFINITION
val_[int(key_)-1] = PWInput.proc_val(key, val)
sections[section][key] = val_
site_properties[key] = []
else:
sections[section][key][int(key_)-1] = PWInput.proc_val(key, val)
else:
sections[section][key] = PWInput.proc_val(key, val)
elif mode[0] == "pseudo":
m = re.match(r'(\w+)\s+(\d*.\d*)\s+(.*)', line)
if m:
pseudo[m.group(1).strip()] = {}
pseudo[m.group(1).strip()]["index"] = pseudo_index
pseudo[m.group(1).strip()]["pseudopot"] = m.group(3).strip()
pseudo_index += 1
elif mode[0] == "kpoints":
m = re.match(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)', line)
if m:
kpoints_grid = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
kpoints_shift = (int(m.group(4)), int(m.group(5)), int(m.group(6)))
else:
kpoints_mode = mode[1]
elif mode[0] == "structure":
m_l = re.match(r'(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line)
m_p = re.match(r'(\w+)\s+(-?\d+\.\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line)
if m_l:
lattice += [ float(m_l.group(1)), float(m_l.group(2)), float(m_l.group(3)) ]
elif m_p:
site_properties["pseudo"].append(pseudo[m_p.group(1)]["pseudopot"])
species += [pseudo[m_p.group(1)]["pseudopot"].split(".")[0]]
coords += [[float(m_p.group(2)), float(m_p.group(3)), float(m_p.group(4))]]
for k, v in site_properties.items():
if k != "pseudo":
site_properties[k].append(sections['system'][k][pseudo[m_p.group(1)]["index"]])
if mode[1] == "angstrom":
coords_are_cartesian = True
elif mode[1] == "crystal":
coords_are_cartesian = False
structure = Structure(Lattice(lattice), species, coords,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
return PWInput(structure=structure, control=sections["control"],
system=sections["system"], electrons=sections["electrons"],
ions=sections["ions"], cell=sections["cell"], kpoints_mode=kpoints_mode,
kpoints_grid=kpoints_grid, kpoints_shift=kpoints_shift)
def proc_val(key, val):
"""
Static helper method to convert PWINPUT parameters to proper type, e.g.,
integers, floats, etc.
Args:
key: PWINPUT parameter key
val: Actual value of PWINPUT parameter.
"""
float_keys = ('etot_conv_thr','forc_conv_thr','conv_thr','Hubbard_U','Hubbard_J0','defauss',
'starting_magnetization',)
int_keys = ('nstep','iprint','nberrycyc','gdir','nppstr','ibrav','nat','ntyp','nbnd','nr1',
'nr2','nr3','nr1s','nr2s','nr3s','nspin','nqx1','nqx2','nqx3','lda_plus_u_kind',
'edir','report','esm_nfit','space_group','origin_choice','electron_maxstep',
'mixing_ndim','mixing_fixed_ns','ortho_para','diago_cg_maxiter','diago_david_ndim',
'nraise','bfgs_ndim','if_pos','nks','nk1','nk2','nk3','sk1','sk2','sk3','nconstr')
bool_keys = ('wf_collect','tstress','tprnfor','lkpoint_dir','tefield','dipfield','lelfield',
'lorbm','lberry','lfcpopt','monopole','nosym','nosym_evc','noinv','no_t_rev',
'force_symmorphic','use_all_frac','one_atom_occupations','starting_spin_angle',
'noncolin','x_gamma_extrapolation','lda_plus_u','lspinorb','london',
'ts_vdw_isolated','xdm','uniqueb','rhombohedral','realxz','block',
'scf_must_converge','adaptive_thr','diago_full_acc','tqr','remove_rigid_rot',
'refold_pos')
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in bool_keys:
if val.lower() == ".true.":
return True
elif val.lower() == ".false.":
return False
else:
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*d?-?\d*", val.lower()).group(0).replace("d", "e"))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
try:
val = val.replace("d","e")
return smart_int_or_float(val)
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
m = re.match(r"^[\"|'](.+)[\"|']$", val)
if m:
return m.group(1)
class PWInputError(BaseException):
pass
class PWOutput(object):
patterns = {
"energies": r'total energy\s+=\s+([\d\.\-]+)\sRy',
"ecut": r'kinetic\-energy cutoff\s+=\s+([\d\.\-]+)\s+Ry',
"lattice_type": r'bravais\-lattice index\s+=\s+(\d+)',
"celldm1": r"celldm\(1\)=\s+([\d\.]+)\s",
"celldm2": r"celldm\(2\)=\s+([\d\.]+)\s",
"celldm3": r"celldm\(3\)=\s+([\d\.]+)\s",
"celldm4": r"celldm\(4\)=\s+([\d\.]+)\s",
"celldm5": r"celldm\(5\)=\s+([\d\.]+)\s",
"celldm6": r"celldm\(6\)=\s+([\d\.]+)\s",
"nkpts": r"number of k points=\s+([\d]+)"
}
def __init__(self, filename):
self.filename = filename
self.data = defaultdict(list)
self.read_pattern(PWOutput.patterns)
for k, v in self.data.items():
if k == "energies":
self.data[k] = [float(i[0][0]) for i in v]
elif k in ["lattice_type", "nkpts"]:
self.data[k] = int(v[0][0][0])
else:
self.data[k] = float(v[0][0][0])
def read_pattern(self, patterns, reverse=False,
terminate_on_match=False, postprocess=str):
"""
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned
values are lists of lists, because you can grep multiple
items on one line.
"""
matches = regrep(self.filename, patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess)
self.data.update(matches)
def get_celldm(self, i):
return self.data["celldm%d" % i]
@property
def final_energy(self):
return self.data["energies"][-1]
@property
def lattice_type(self):
return self.data["lattice_type"]
|
from datetime import date
ano = date.today().year
nome = input("digite seu nome: ")
idade = int(input("digite sua idade: "))
altura = float(input("digite sua altura: "))
peso = float(input("digite seu peso: "))
ano_nascimento = int(ano) - idade
imc = peso/(altura*altura)
print(f'nome:{nome}, idade:{idade}, altura:{altura}, peso:{peso}, ano de nascimento:{ano_nascimento}, imc:{imc}')
|
from enum import Enum
from typing import Any
from app.schemas.debit import DebitCreate, DebitUpdate
from fastapi import APIRouter, Depends, HTTPException
from fastapi import status as sts
from sqlalchemy.orm import Session
from app import crud, models, schemas
from app.api import deps
from app.core.celery_app import celery_app
router = APIRouter()
class StatusRequest(str, Enum):
canceled = "canceled"
approved = "approved"
rejected = "rejected"
@router.get("/request", response_model=schemas.Debit)
async def get_automatic_debit_request(
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_user)
) -> Any:
"""
Request Automatic Debit.
"""
debit = crud.debit.get_by_owner(db, owner_id=current_user.id)
if debit:
raise HTTPException(status_code=sts.HTTP_400_BAD_REQUEST,
detail="Automatic debit request already made.")
obj_in = DebitCreate()
return crud.debit.create_with_owner(db, obj_in=obj_in,
owner_id=current_user.id)
@router.put("/{owner_id}", response_model=schemas.Debit)
async def update_status(
owner_id: int,
status: StatusRequest,
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Update Automatic Debit Status.
"""
debit = crud.debit.get_by_owner(db, owner_id=owner_id)
if not debit:
raise HTTPException(status_code=sts.HTTP_404_NOT_FOUND,
detail="Not Found automatic debit request by id")
obj_in = DebitUpdate(status=status)
debit_out = crud.debit.update_status(db, db_obj=debit, obj_in=obj_in)
if status in (StatusRequest.canceled, StatusRequest.approved):
user = crud.user.get(db, id=debit.owner_id)
celery_app.send_task("app.tasks.send_email.email_task",
args=[status, user.email])
return debit_out
@router.get("/{owner_id}", response_model=schemas.Debit)
async def get_automatic_debit_by_owner_id(
owner_id: int,
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_user)
) -> Any:
"""
Get Automatic Debit By Owner Id.
"""
debit = crud.debit.get_by_owner(db, owner_id=owner_id)
if not debit:
raise HTTPException(status_code=sts.HTTP_404_NOT_FOUND,
detail="Automatic debit request not found.")
return debit
|
# %%
'''Python Program to check for Stack Permutations.
What are Stack Permutations?
If the elements of the input queue can be permuted into the order of the elements in the output queue using '''
# %%
from queue import Queue
from collections import deque
# %%
def checkStackPermutation(inp, output, length):
# Input queue
inpQ = Queue()
for i in range(length):
inpQ.put(inp[i])
# output queue
outQ = Queue()
for i in range(length):
outQ.put(output[i])
# stack to be used for permutation
tempStack = deque()
while (not inpQ.empty()):
element = inpQ.queue[0]
inpQ.get()
if (element == outQ.queue[0]):
outQ.get()
while (len(tempStack) != 0):
if (tempStack[-1] == outQ.queue[0]):
tempStack.pop()
outQ.get()
else:
break
else:
tempStack.append(element)
# If after processing, both Input
# queue and stack are empty then
# the Input queue is permutable
# otherwise not.
return (inpQ.empty() and
len(tempStack) == 0)
# %%
if __name__ == '__main__':
inp = list(map(int, input().split()))
# Output Queue
output = list(map(int, input().split()))
length = len(inp)
if (checkStackPermutation(inp, output, length)):
print("Yes, the output is a possible stack permutation of the input.")
else:
print("No, the output is not a stack permutation of the input")
# %%
|
import sys
from datetime import datetime
from flask import Flask, g
from flask.json import JSONEncoder
from oslo_config import cfg
from oslo_log import log as logging
from pymsboot import config
from pymsboot.api.v1.movie import bp_movie
from pymsboot.context import make_context
from pymsboot.db.api import db_init, DbApi
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
app = Flask(__name__)
def prepare_service(argv):
_DEFAULT_LOG_LEVELS = [
'eventlet.wsgi.server=WARN',
'oslo_service.periodic_task=INFO',
'oslo_service.loopingcall=INFO',
'oslo_concurrency.lockutils=WARN',
'urllib3.connectionpool=CRITICAL',
'futurist.periodics=WARN',
'flask=INFO',
'sqlalchemy=WARN',
'werkzeug=INFO'
]
extra_log_level_defaults = [
'flask=INFO'
]
default_log_levels = logging.get_default_log_levels()
default_log_levels.extend(_DEFAULT_LOG_LEVELS)
logging.register_options(CONF)
logging.set_defaults(default_log_levels=default_log_levels + extra_log_level_defaults)
config.parse_args(args=argv)
logging.setup(CONF, 'pymsboot_api')
def create_app():
if sys.argv is None:
argv = []
else:
argv = sys.argv[1:]
prepare_service(argv)
db_init()
LOG.info('setup api wsgi app')
app.config.update(
DEBUG=True,
SECRET_KEY=b'\x87T4a\x00\x8e\x12\xf8\xaa\x90\xe2\x98\xcf6Td\xaa\xf6\x8e\xf2\n\xae\x12'
)
app.json_encoder = CustomJSONEncoder
app.wsgi_app = OsloLogMiddleware(app.wsgi_app)
app.register_blueprint(bp_movie, url_prefix='/api/v1/')
return app
@app.before_request
def before_req():
g.dbapi = DbApi()
g.req_ctx = make_context()
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, datetime):
return obj.isoformat()
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj)
class OsloLogMiddleware(object):
format = ('%(REMOTE_ADDR)s %(REMOTE_USER)s %(REMOTE_STATUS)s '
'"%(REQUEST_METHOD)s %(REQUEST_URI)s" status: %(status)s'
' len: %(bytes)s')
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
LOG.info('Starting request: %s "%s %s"' %
(environ['REMOTE_ADDR'], environ['REQUEST_METHOD'],
self._get_uri(environ)))
if LOG.isEnabledFor(logging.INFO):
return self._log_app(environ, start_response)
else:
return self.application(environ, start_response)
@staticmethod
def _get_uri(environ):
req_uri = (environ.get('SCRIPT_NAME', '')
+ environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
req_uri += '?' + environ['QUERY_STRING']
return req_uri
def _log_app(self, environ, start_response):
req_uri = self._get_uri(environ)
def replacement_start_response(status, headers, exc_info=None):
"""We need to gaze at the content-length, if set, to
write log info.
"""
size = None
for name, value in headers:
if name.lower() == 'content-length':
size = value
self.write_log(environ, req_uri, status, size)
return start_response(status, headers, exc_info)
return self.application(environ, replacement_start_response)
def write_log(self, environ, req_uri, status, size):
"""Write the log info out in a formatted form to ``LOG.info``.
"""
if size is None:
size = '-'
log_format = {
'REMOTE_ADDR': environ.get('REMOTE_ADDR') or '-',
'REMOTE_USER': environ.get('HTTP_X_USER_ID', '-'),
'REMOTE_STATUS': environ.get('HTTP_X_IDENTITY_STATUS', '-'),
'REQUEST_METHOD': environ['REQUEST_METHOD'],
'REQUEST_URI': req_uri,
'status': status.split(None, 1)[0],
'bytes': size,
}
# We don't need to worry about trying to avoid the cost of
# interpolation here because we only reach this code if INFO
# is enabled.
message = self.format % log_format
LOG.info(message)
|
from django.db import models
from django.contrib.auth.models import User
import os
from django.conf import settings
import json
import logging
from datetime import datetime
import numpy as np
from django.db import transaction
from shutil import rmtree
import struct
import re
from django.contrib import admin
from django.db.models.signals import pre_delete
from django.dispatch import receiver
class Dataset(models.Model):
name = models.CharField('Name', max_length=50)
text_id = models.TextField(unique=True, null=False)
description = models.TextField('Description')
owner = models.ForeignKey(User, null=False, default=0)
terms_count = models.IntegerField(default=0)
documents_count = models.IntegerField(default=0)
modalities_count = models.IntegerField(default=0)
creation_time = models.DateTimeField(null=False, default=datetime.now)
# 0=OK, 1=processing, 2=error
status = models.IntegerField(null=False, default=0)
error_message = models.TextField(null=True)
language = models.TextField(null=False, default="english")
preprocessing_params = models.TextField(null=False, default="{}")
time_provided = models.BooleanField(null=False, default=True)
is_public = models.BooleanField(null=False, default=True)
def __str__(self):
return self.name
def reload(self):
self.prepare_log()
self.log("Loading dataset " + self.text_id + "...")
Term.objects.filter(dataset=self).delete()
Document.objects.filter(dataset=self).delete()
Modality.objects.filter(dataset=self).delete()
from models.models import ArtmModel
ArtmModel.objects.filter(dataset=self).delete()
try:
meta_file = os.path.join(self.get_folder(), "meta", "meta.json")
with open(meta_file) as f:
self.docs_info = json.load(f)
except BaseException as ex:
self.log("WARNING! Wasn't able to load meta.json")
self.log(str(ex))
self.time_provided = False
self.docs_info = {}
try:
preprocessing_params = json.loads(self.preprocessing_params)
self.log("Preprocessing params:" + str(preprocessing_params))
except BaseException:
preprocessing_params = {}
self.log("Warning! Failed to load preprocessing parameters.")
# Preprocessing
custom_vocab = False
if "parse" in preprocessing_params:
self.preprocess_parse(preprocessing_params["parse"])
if "filter" in preprocessing_params:
self.preprocess_filter(preprocessing_params["filter"])
custom_vocab = True
if "custom_vocab" in preprocessing_params and preprocessing_params[
"custom_vocab"]:
self.log("Will use custom vocab.txt")
custom_vocab = True
self.create_batches()
self.gather_dictionary(custom_vocab=custom_vocab)
self.load_documents()
self.log("Loaded " + str(self.documents_count) + " documents.")
# Creating folder for models
model_path = os.path.join(
settings.DATA_DIR, "datasets", self.text_id, "models")
if not os.path.exists(model_path):
os.makedirs(model_path)
self.log("Dataset " + self.text_id + " loaded.")
self.creation_time = datetime.now()
self.status = 0
self.save()
# Counting weights for terms.
self.log("Counting weights for terms.")
self.reset_terms_weights()
def preprocess_parse(self, params):
self.log("Parsing documents...")
from algo.preprocessing.Parser import Parser
parser = Parser(self.get_folder())
if "store_order" in params:
parser.store_order = params["store_order"]
if "hashtags" in params:
parser.hashtags = params["hashtags"]
if "bigrams" in params:
parser.bigrams = params["bigrams"]
self.log("Parsing initialized.")
parser.process()
self.log("Parsing done.")
def preprocess_filter(self, params):
from algo.preprocessing.VocabFilter import VocabFilter
self.log("Filtering words...")
filter = VocabFilter(os.path.join(self.get_folder(), "vw.txt"))
self.log("Filtering initilized.")
if "lower_bound" in params:
filter.lower_bound = int(params["lower_bound"])
if "upper_bound" in params:
filter.upper_bound = int(params["upper_bound"])
if "upper_bound_relative" in params:
filter.upper_bound_relative = int(params["upper_bound_relative"])
if "minimal_length" in params:
filter.minimal_length = int(params["minimal_length"])
filter.save_vocabulary(os.path.join(self.get_folder(), "vocab.txt"))
self.log("Filtering done.")
def create_batches(self):
import artm
self.log("Creating ARTM batches...")
batches_folder = os.path.join(self.get_folder(), "batches")
if os.path.exists(batches_folder):
rmtree(batches_folder)
os.makedirs(batches_folder)
vw_path = os.path.join(self.get_folder(), "vw.txt")
if not os.path.exists(vw_path):
raise ValueError("FATAL ERROR! vw.txt file wasn't found.")
batch_vectorizer = artm.BatchVectorizer(
data_path=vw_path,
data_format="vowpal_wabbit",
batch_size=10000,
collection_name=self.text_id,
target_folder=batches_folder
)
self.log("Batches created.")
@transaction.atomic
def gather_dictionary(self, custom_vocab=False):
import artm
self.log("Creating ARTM dictionary...")
dictionary = artm.Dictionary(name="dictionary")
batches_folder = os.path.join(self.get_folder(), "batches")
vocab_file_path = os.path.join(self.get_folder(), "vocab.txt")
if custom_vocab:
dictionary.gather(batches_folder, vocab_file_path=vocab_file_path)
else:
dictionary.gather(batches_folder)
vocab_file = open(vocab_file_path, "w", encoding="utf-8")
dictionary_file_name = os.path.join(
self.get_folder(), "batches", "dictionary.txt")
dictionary.save_text(dictionary_file_name)
self.log("Saving terms to database...")
term_index_id = -3
self.modalities_count = 0
self.terms_index = dict()
modalities_index = dict()
with open(dictionary_file_name, "r", encoding='utf-8') as f:
for line in f:
term_index_id += 1
if term_index_id < 0:
continue
parsed = line.replace(',', ' ').split()
term = Term()
term.dataset = self
term.text = parsed[0]
term.index_id = term_index_id
term.token_value = float(parsed[2])
term.token_tf = int(parsed[3].split('.')[0])
term.token_df = int(parsed[4].split('.')[0])
modality_name = parsed[1]
if modality_name not in modalities_index:
modality = Modality()
modality.index_id = self.modalities_count
self.modalities_count += 1
modality.name = modality_name
modality.dataset = self
modality.save()
modalities_index[modality_name] = modality
modality = modalities_index[modality_name]
term.modality = modality
modality.terms_count += 1
term.save()
if not custom_vocab:
vocab_file.write("%s %s\n" % (parsed[0], parsed[1]))
self.terms_index[term.text] = term
self.terms_index[term.text + "$#" + term.modality.name] = term
self.terms_index[term.index_id] = term
if term_index_id % 10000 == 0:
self.log(str(term_index_id))
# print(term_index_id)
if not custom_vocab:
vocab_file.close()
self.terms_count = term_index_id + 1
self.terms_count = term_index_id + 1
self.log("Saving modalities...")
max_modality_size = 0
word_modality_id = -1
for key, modality in modalities_index.items():
if modality.terms_count > max_modality_size:
word_modality_id = modality.id
max_modality_size = modality.terms_count
for key, modality in modalities_index.items():
if modality.id == word_modality_id:
modality.weight_spectrum = 1
modality.weight_naming = 1
if 'tag' in modality.name:
modality.is_tag = True
modality.save()
self.normalize_modalities_weights()
@transaction.atomic
def load_documents(self):
vw_file_name = os.path.join(self.get_folder(), "vw.txt")
self.log(
"Loading documents in Vowpal Wabbit format from " +
vw_file_name)
doc_id = 0
with open(vw_file_name, "r", encoding="utf-8") as f:
for line in f:
if len(line) <= 1:
continue
doc = Document()
doc.dataset = self
doc.index_id = doc_id
doc.fetch_vw(line)
if doc.text_id in self.docs_info:
doc.fetch_meta(self.docs_info[doc.text_id])
doc.save()
doc_id += 1
if doc_id % 1000 == 0:
self.log(str(doc_id))
self.documents_count = doc_id
self.save()
def reload_untrusted(self):
try:
self.reload()
except BaseException:
import traceback
self.error_message = traceback.format_exc()
self.status = 2
self.save()
def upload_from_archive(self, archive):
archive_name = str(archive)
parsed = archive_name.split('.')
if parsed[1] != 'zip':
raise ValueError("Must be zip archive")
self.text_id = parsed[0]
self.name = parsed[0]
self.prepare_log("Loading dataset %s from archive..." % self.text_id)
if len(Dataset.objects.filter(text_id=parsed[0])) != 0:
raise ValueError("Dataset " + parsed[0] + " already exists.")
zip_file_name = os.path.join(self.get_folder(), archive_name)
with open(os.path.join(self.get_folder(), archive_name), 'wb+') as f:
for chunk in archive.chunks():
f.write(chunk)
self.log("Archive uploaded.")
import zipfile
zip_ref = zipfile.ZipFile(zip_file_name, 'r')
zip_ref.extractall(self.get_folder())
zip_ref.close()
self.log("Archive unpacked. Dataset name: " + self.text_id)
os.remove(zip_file_name)
def get_batches(self):
import artm
dataset_path = os.path.join(
settings.DATA_DIR, "datasets", self.text_id)
batches_folder = os.path.join(dataset_path, "batches")
dictionary_file_name = os.path.join(batches_folder, "dictionary.txt")
batch_vectorizer = artm.BatchVectorizer(
data_path=batches_folder, data_format="batches")
dictionary = artm.Dictionary(name="dictionary")
dictionary.load_text(dictionary_file_name)
return batch_vectorizer, dictionary
def objects_safe(request):
if request.user.is_anonymous():
return Dataset.objects.filter(is_public=True)
else:
return Dataset.objects.filter(is_public=True) |\
Dataset.objects.filter(is_public=False, owner=request.user)
def prepare_log(self, string=""):
self.log_file_name = os.path.join(self.get_folder(), "log.txt")
with open(self.log_file_name, "w") as f:
f.write("%s<br>\n" % string)
def log(self, string):
if settings.DEBUG:
print(string)
if settings.THREADING:
with open(self.log_file_name, "a") as f:
f.write(string + "<br>\n")
def read_log(self):
try:
log_file_name = os.path.join(
settings.DATA_DIR, "datasets", self.text_id, "log.txt")
with open(log_file_name, "r") as f:
return f.read()
except BaseException:
return "Datased is reloading"
def get_folder(self):
path = os.path.join(settings.DATA_DIR, "datasets", self.text_id)
if not os.path.exists(path):
os.makedirs(path)
return path
def get_terms_index(self, modality=None):
terms_index = dict()
if modality:
query_set = Term.objects.filter(dataset=self, modality=modality)
else:
query_set = Term.objects.filter(
dataset=self).order_by("-modality__weight_naming")
for term in query_set:
terms_index[term.text] = term.index_id
# terms_index[term.text + "#$" + term.modality.name] =
# term.index_id
return terms_index
def check_terms_order(self, index, full=True):
if self.terms_count != len(index):
return False
if full:
for term in Term.objects.filter(dataset=self):
if index[term.index_id] != term.text:
return False
else:
import random
for i in range(10):
term_iid = random.randint(0, self.terms_count - 1)
if index[term_iid] != Term.objects.get(
dataset_id=self.id, index_id=term_iid).text:
return False
return True
@transaction.atomic
def normalize_modalities_weights(self):
weight_naming_sum = 0
weight_spectrum_sum = 0
modalities = Modality.objects.filter(dataset=self)
wn = Dataset.normalize_weights([m.weight_naming for m in modalities])
ws = Dataset.normalize_weights([m.weight_spectrum for m in modalities])
for i in range(len(modalities)):
modality = modalities[i]
modality.weight_naming = wn[i]
modality.weight_spectrum = ws[i]
modality.save()
def normalize_weights(w):
s = sum(w)
if s == 0:
w[0] = 1
return w
w = [int((x / s) * 1000) for x in w]
w[0] += (1000 - sum(w))
return [x / 1000 for x in w]
def get_dataset(request, modify=False):
if "dataset_id" in request.GET:
dataset = Dataset.objects.get(id=request.GET['dataset_id'])
elif "ds" in request.GET:
dataset = Dataset.objects.get(id=request.GET['ds'])
elif "dataset" in request.GET:
dataset = Dataset.objects.get(text_id=request.GET['dataset'])
elif "dataset" in request.POST:
dataset = Dataset.objects.get(text_id=request.POST['dataset'])
else:
return None
if ((modify or not dataset.is_public)
and not dataset.owner == request.user):
return None
return dataset
# Returns array of terms weights.
# Note. Each modality has two weights: for distance counting (spectrum)
# and for top terms ranking (naming). In some procedures those weights are
# used. To speed up those procedures, we count all weights for terms once
# in reset_terms_weights and store in file. This function just read
# corresonding array from file.
def get_terms_weights(self, mode):
if not os.path.exists(os.path.join(
self.get_folder(), "terms_weights")):
self.reset_terms_weights()
if mode == "spectrum":
return np.load(os.path.join(self.get_folder(),
"terms_weights", "spectrum.npy"))
elif mode == "naming":
return np.load(os.path.join(self.get_folder(),
"terms_weights", "naming.npy"))
# Counts weights for terms and stores them in file.
def reset_terms_weights(self):
folder = os.path.join(self.get_folder(), "terms_weights")
if not os.path.exists(folder):
os.makedirs(folder)
weights_spectrum = np.zeros(self.terms_count)
weights_naming = np.zeros(self.terms_count)
for modality in Modality.objects.filter(dataset=self):
ws = modality.weight_spectrum
wn = modality.weight_naming
for term in Term.objects.filter(modality=modality):
weights_spectrum[term.index_id] = ws
weights_naming[term.index_id] = wn
np.save(os.path.join(folder, "spectrum.npy"), weights_spectrum)
np.save(os.path.join(folder, "naming.npy"), weights_naming)
def delete_cached_distances(self):
from models.models import ArtmModel
for model in ArtmModel.objects.filter(dataset=self):
model.delete_cached_distances()
def delete_unused_folders(self):
from models.models import ArtmModel
models_folder = os.path.join(self.get_folder(), "models")
legit_models = set([
model.text_id
for model
in ArtmModel.objects.filter(dataset=self)
])
for folder in os.listdir(models_folder):
if folder not in legit_models:
folder_to_remove = os.path.join(models_folder, folder)
print("Removing trash: %s" % folder_to_remove)
rmtree(folder_to_remove)
def get_modalities_mask(self):
path = os.path.join(self.get_folder(), "modalities_mask.npy")
if os.path.exists(path):
return np.load(path)
else:
ans = np.zeros(self.terms_count, dtype=np.int32)
i = 0
for term in Term.objects.filter(dataset=self):
ans[i] = term.modality.index_id
i += 1
np.save(path, ans)
return ans
@receiver(pre_delete, sender=Dataset, dispatch_uid='dataset_delete_signal')
def remove_dataset_files(sender, instance, using, **kwargs):
folder = instance.get_folder()
print("Will delete folder " + folder)
try:
rmtree(folder)
except BaseException:
pass
def on_start():
'''
for dataset in Dataset.objects.all():
try:
dataset.delete_unused_folders()
except:
pass
'''
for dataset in Dataset.objects.filter(status=1):
dataset.status = 2
dataset.error_message = "Dataset processing was interrupted."
dataset.save()
class Document(models.Model):
# Title of the document.
title = models.TextField(null=False)
# Link to the document on the Internet.
url = models.URLField(null=True)
# Short description of the document.
snippet = models.TextField(null=True)
# Time of publicaton.
time = models.DateTimeField(null=True)
# Id inside dataset.
index_id = models.IntegerField(null=False)
# Should coincide with relative path of text file, if available.
text_id = models.TextField(null=True)
# Dataset, to which this document belongs.
dataset = models.ForeignKey(Dataset, null=False)
# [4 bytes term.index_id][2 bytes count][1 byte modality.index_id]
bag_of_words = models.BinaryField(null=True)
# Number of terms.
terms_count = models.IntegerField(null=False, default=0)
# Number of unique terms.
unique_terms_count = models.IntegerField(null=False, default=0)
# Full text of document.
text = models.TextField(null=True)
# [4 bytes position][1 byte length][4 bytes term.index_id]
word_index = models.BinaryField(null=True)
class Meta:
# index_id's should be unique inside datasets.
unique_together = (("dataset", "index_id"))
# Extracts metatdata from doc_info object and saves it inside self.
# To be used when dataset is loaded from archive.
def fetch_meta(self, doc_info):
if 'title' in doc_info:
self.title = doc_info["title"]
if "snippet" in doc_info:
self.snippet = doc_info["snippet"]
if "url" in doc_info:
self.url = doc_info["url"]
if "time" in doc_info:
lst = doc_info["time"]
try:
self.time = datetime.fromtimestamp(lst)
except BaseException:
self.time = datetime(lst[0], lst[1], lst[2],
lst[3], lst[4], lst[5])
else:
if self.dataset.time_provided:
self.dataset.log("Warning! Time isn't provided.")
self.dataset.time_provided = False
# Extracts document from it's Vowpal Wabbit description.
def fetch_vw(self, line_vw):
parsed_vw = line_vw.split()
self.text_id = parsed_vw[0]
self.title = self.text_id
self.word_index = bytes()
self.text = ""
# Try load text and wordpos
text_found = False
text_file = os.path.join(
self.dataset.get_folder(), "documents", self.text_id)
if os.path.exists(text_file):
text_found = True
with open(text_file, "r", encoding="utf-8") as f2:
self.text = f2.read()
wordpos_file = os.path.join(
self.dataset.get_folder(), "wordpos", self.text_id)
if os.path.exists(wordpos_file):
word_index_list = []
with open(wordpos_file, "r", encoding="utf-8") as f2:
for line in f2.readlines():
parsed = line.split()
if len(parsed) < 3:
continue
key = parsed[2]
if key in self.dataset.terms_index:
term_iid = self.dataset.terms_index[key].index_id
word_index_list.append(
(int(parsed[0]), -int(parsed[1]), term_iid))
word_index_list.sort()
self.word_index = bytes()
for pos, length, tid in word_index_list:
self.word_index += struct.pack('I', pos) + struct.pack(
'B', -length) + struct.pack('I', tid)
else:
self.dataset.log(
"WARNING! No wordpos for file " + self.text_id)
bow = BagOfWords()
current_modality = '@default_class'
for term in parsed_vw[1:]:
if term[0] == '|':
current_modality = term[1:]
else:
parsed_term = term.split(':')
key = parsed_term[0] + "$#" + current_modality
if ':' in term:
count = int(float(parsed_term[1]))
else:
count = 1
try:
term_index_id = self.dataset.terms_index[key].index_id
self.terms_count += count
bow.add_term(term_index_id, count)
if not text_found:
self.word_index += \
struct.pack('I', len(self.text)) + \
struct.pack('B', len(parsed_term[0])) + \
struct.pack('I', term_index_id)
except BaseException:
pass
if not text_found:
self.text += term + " "
self.bag_of_words = bow.to_bytes(self.dataset.terms_index)
self.unique_terms_count = len(self.bag_of_words) // 7
def objects_safe(request):
if request.user.is_anonymous():
return Document.objects.filter(dataset__is_public=True)
else:
return (Document.objects.filter(dataset__is_public=True) |
Document.objects.filter(
dataset__is_public=False, dataset__owner=request.user))
def count_term(self, iid):
bow = self.bag_of_words
left = 0
right = len(bow) // 7
if right == 0:
return 0
while True:
pos = (left + right) // 2
bow_iid = struct.unpack('I', bow[7 * pos: 7 * pos + 4])[0]
if bow_iid == iid:
return struct.unpack('H', bow[7 * pos + 4: 7 * pos + 6])[0]
elif bow_iid > iid:
right = pos
else:
left = pos + 1
if left >= right:
return 0
def fetch_tags(self):
tag_modalities = Modality.objects.filter(
dataset=self.dataset, is_tag=True)
if len(tag_modalities) == 0:
return []
tag_names = dict()
tag_strings = dict()
for modality in tag_modalities:
tag_names[modality.index_id] = modality.name
bow = self.bag_of_words
unique_terms_count = len(bow) // 7
for i in range(unique_terms_count):
bow_iid = struct.unpack('I', bow[7 * i: 7 * i + 4])[0]
modality_iid = struct.unpack('B', bow[7 * i + 6: 7 * i + 7])[0]
if modality_iid in tag_names:
term = Term.objects.filter(
dataset=self.dataset, index_id=bow_iid)[0]
if modality_iid in tag_strings:
tag_strings[modality_iid] += ', '
else:
tag_strings[modality_iid] = ''
tag_strings[modality_iid] += '<a href="/term?id=' + \
str(term.id) + '">' + term.text + '</a>'
ret = []
for tag_id, tag_string in tag_strings.items():
ret.append({"name": tag_names[tag_id], "string": tag_string})
return ret
# Returns set of index_id's of words in this document which are modlities.
def get_tags_ids(self):
tag_ids = set()
ret = set()
for modality in Modality.objects.filter(
dataset=self.dataset, is_tag=True):
tag_ids.add(modality.index_id)
bow = self.bag_of_words
for i in range(len(bow) // 7):
modality_iid = struct.unpack('B', bow[7 * i + 6: 7 * i + 7])[0]
if modality_iid in tag_ids:
ret.add(struct.unpack('I', bow[7 * i: 7 * i + 4])[0])
return ret
def fetch_bow(self, cut_bow):
bow = self.bag_of_words
unique_terms_count = len(bow) // 7
bow_entries = []
for i in range(unique_terms_count):
bow_iid = struct.unpack('I', bow[7 * i: 7 * i + 4])[0]
bow_count = struct.unpack('H', bow[7 * i + 4: 7 * i + 6])[0]
bow_entries.append((-bow_count, bow_iid))
bow_entries.sort()
bow_send = ""
prfx = "<a href = '/term?ds=" + str(self.dataset.id) + "&iid="
rest = unique_terms_count
for x in bow_entries:
cnt = -x[0]
iid = x[1]
if cnt <= cut_bow:
bow_send += str(rest) + " terms, which occured " + \
str(cut_bow) + " times or less, aren't shown."
break
bow_send += prfx + str(iid) + "'>" + \
Term.objects.filter(dataset=self.dataset,
index_id=iid)[0].text + \
"</a>: " + str(cnt) + "<br>"
rest -= 1
return bow_send
def get_text(self):
return self.text
# Returns positions of terms as list of triples:
# (position, length, term.index_id).
def get_word_index(self, no_overlap=True):
wi = self.word_index
if wi is None:
return None
count = len(wi) // 9
last_pos = -1
ret = []
for i in range(count):
pos = struct.unpack('I', wi[9 * i: 9 * i + 4])[0]
length = struct.unpack('B', wi[9 * i + 4: 9 * i + 5])[0]
if no_overlap:
if pos < last_pos:
continue
else:
last_pos = pos + length
ret.append((pos, length, struct.unpack(
'I', wi[9 * i + 5: 9 * i + 9])[0]))
return ret
def get_concordance(self, terms):
text = self.text
wi = self.word_index
conc = ""
cur_pos = 0
for i in range(len(wi) // 9):
term_index_id = struct.unpack('I', wi[9 * i + 5: 9 * i + 9])[0]
if term_index_id in terms:
pos = struct.unpack('I', wi[9 * i: 9 * i + 4])[0]
length = struct.unpack('B', wi[9 * i + 4: 9 * i + 5])[0]
conc += text[cur_pos: pos] + "<b>" + \
text[pos: pos + length] + "</b>"
cur_pos = pos + length
conc += text[cur_pos:]
sentences = filter(None, re.split("[!?.\n]+", conc))
conc = ""
ctr = 0
for sentence in sentences:
if "</b>" in sentence:
ctr += 1
if ctr == 10:
conc += "<i>(Not all occurences are shown)</i><br>"
break
length = len(sentence)
pref = ""
suf = "."
fi = sentence.find("<b>") - 60
li = sentence.find("</b>") + 60
if fi < 0:
fi = 0
else:
while(fi != 0 and sentence[fi] != ' '):
fi -= 1
pref = "... "
if li > length:
li = length
else:
while(li < length and sentence[li] != ' '):
li += 1
suf = " ..."
conc += pref + sentence[fi: li] + suf + "<br>"
return conc[:-4]
return conc[:-4]
def __str__(self):
return self.title
class Modality(models.Model):
name = models.TextField(null=False)
dataset = models.ForeignKey(Dataset, null=False)
terms_count = models.IntegerField(null=False, default=0)
index_id = models.IntegerField(null=False, default=0)
is_tag = models.BooleanField(null=False, default=False)
weight_naming = models.FloatField(null=False, default=0)
weight_spectrum = models.FloatField(null=False, default=0)
def __str__(self):
return self.dataset.name + "/" + self.name
class Term(models.Model):
text = models.TextField(null=False)
modality = models.ForeignKey(Modality, null=False)
dataset = models.ForeignKey(Dataset, null=False)
# id in UCI files and word_index files
index_id = models.IntegerField(null=False)
token_value = models.FloatField(default=0)
token_tf = models.IntegerField(default=0)
token_df = models.IntegerField(default=0)
documents = models.BinaryField(null=True)
documents_defined = models.BooleanField(default=False)
def __str__(self):
return self.text
def count_documents_index(self):
if self.documents_defined:
return
self.documents_defined = True
self.save()
relations = []
documents = Document.objects.filter(dataset=self.dataset)
temp_count = 0
self.documents = bytes()
for document in documents:
count = document.count_term(self.index_id)
if count != 0:
relations.append((count, document.index_id))
if temp_count < 5:
self.documents += struct.pack('I', document.index_id) + \
struct.pack('H', count)
temp_count += 1
self.save()
relations.sort(reverse=True)
self.documents = bytes()
for count, document_index_id in relations:
self.documents += struct.pack('I', document_index_id) + \
struct.pack('H', count)
self.save()
def get_documents(self):
self.count_documents_index()
for i in range(len(self.documents) // 6):
doc_iid = struct.unpack('I', self.documents[6 * i: 6 * i + 4])[0]
yield Document.objects.get(dataset_id=self.dataset_id,
index_id=doc_iid)
def objects_safe(request):
if request.user.is_anonymous():
return Term.objects.filter(dataset__is_public=True)
return (Term.objects.filter(dataset__is_public=True) |
Term.objects.filter(dataset__is_public=False,
dataset__owner=request.user))
admin.site.register(Dataset)
admin.site.register(Document)
admin.site.register(Modality)
admin.site.register(Term)
class BagOfWords():
"""Helper class to pack bags of words into binary"""
def __init__(self):
self.bow = dict()
def add_term(self, word_id, count):
if word_id not in self.bow:
self.bow[word_id] = count
else:
self.bow[word_id] += count
def to_bytes(self, terms_index):
ret = bytes()
for word_id, count in sorted(self.bow.items()):
ret += struct.pack('I', word_id) + struct.pack('H', count) + \
struct.pack('B', terms_index[word_id].modality.index_id)
return ret
|
"""
Do a search, generate modules for the questions & answers returned.
"""
import logging
from typing import List
from so_pip.commands.vendorize import import_so_question
from so_pip.utils import guards as guards
from so_pip.utils.user_trace import inform
LOGGER = logging.getLogger(__name__)
def import_so_search(
package_prefix: str,
query: str,
tags: List[str],
output_folder: str,
stop_after: int = -1,
minimum_loc: int = -1,
) -> List[str]:
"""Fetch questions and answers via a search"""
guards.must_be_truthy(query, "query required")
guards.must_be_truthy(output_folder, "output_folder required")
inform(f"Starting search for '{query}'...")
LOGGER.info(f"tags : {tags}")
if not package_prefix:
package_prefix = ""
tags.sort()
# import late
# pylint: disable=import-outside-toplevel
from so_pip.api_clients.stackapi_facade import get_json_by_search
possibles = get_json_by_search(query, tuple(tags))
all_results = []
found = 0
inform(f"Found {len(possibles['items'])} possible answers")
for possible in possibles["items"]:
result = import_so_question(
package_prefix,
possible["question_id"],
output_folder,
minimum_loc=minimum_loc,
)
all_results.extend(result)
found += 1
if stop_after != -1 and (found > stop_after):
break
else:
# nothing in possibles[items]
print("No search results for query")
return all_results
if __name__ == "__main__":
import_so_search("pymarc", "pymarc", ["python"], "../../output")
|
from typing import List, Dict
from cloudrail.knowledge.context.aws.kms.kms_key_manager import KeyManager
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.aws_base_rule import AwsBaseRule
from cloudrail.knowledge.rules.base_rule import Issue
from cloudrail.knowledge.rules.rule_parameters.base_paramerter import ParameterType
class EnsureCodeBuildReportGroupEncryptedWithCustomerManagedCmkRule(AwsBaseRule):
def get_id(self) -> str:
return 'not_car_codebuild_report_groups_encrypted_at_rest_with_customer_managed_cmk'
def execute(self, env_context: AwsEnvironmentContext, parameters: Dict[ParameterType, any]) -> List[Issue]:
issues: List[Issue] = []
for report_group in env_context.codebuild_report_groups:
if report_group.export_config_s3_destination_kms_data is None \
or report_group.export_config_s3_destination_kms_data.key_manager != KeyManager.CUSTOMER:
issues.append(
Issue(
f'The {report_group.get_type()} `{report_group.get_friendly_name()}` '
f'is not set to use encryption at rest '
f'with customer-managed CMK', report_group, report_group))
return issues
def should_run_rule(self, environment_context: AwsEnvironmentContext) -> bool:
return bool(environment_context.codebuild_report_groups)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
from django.utils.translation import ugettext_lazy as _
from backend.components import paas_cc
from backend.components.bcs import k8s
from backend.utils.cache import region
from backend.utils.decorators import parse_response_data
from backend.utils.errcodes import ErrorCode
from backend.utils.error_codes import error_codes
def get_clusters(access_token, project_id):
resp = paas_cc.get_all_clusters(access_token, project_id, desire_all_data=True)
if resp.get('code') != ErrorCode.NoError:
raise error_codes.APIError(f"get clusters error, {resp.get('message')}")
return resp.get("data", {}).get("results") or []
def get_cluster_versions(access_token, kind="", ver_id="", env=""):
resp = paas_cc.get_cluster_versions(access_token, kind=kind, ver_id=ver_id, env=env)
if resp.get('code') != ErrorCode.NoError:
raise error_codes.APIError(f"get cluster version, {resp.get('message')}")
data = resp.get("data") or []
version_list = []
# 以ID排序,稳定版本排在前面
data.sort(key=lambda info: info["id"])
for info in data:
configure = json.loads(info.get("configure") or "{}")
version_list.append(
{"version_id": info["version"], "version_name": configure.get("version_name") or info["version"]}
)
return version_list
def get_cluster_masters(access_token, project_id, cluster_id):
"""获取集群下的master信息"""
resp = paas_cc.get_master_node_list(access_token, project_id, cluster_id)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("获取集群master ip失败,{}").format(resp.get("message")))
results = resp.get("data", {}).get("results") or []
if not results:
raise error_codes.APIError(_("获取集群master ip为空"))
return results
def get_cluster_nodes(access_token, project_id, cluster_id, raise_exception=True):
"""获取集群下的node信息"""
resp = paas_cc.get_node_list(access_token, project_id, cluster_id)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("获取集群node ip失败,{}").format(resp.get("message")))
results = resp.get("data", {}).get("results") or []
if not results and raise_exception:
raise error_codes.APIError(_("获取集群node ip为空"))
return results
def get_cluster_snapshot(access_token, project_id, cluster_id):
"""获取集群快照"""
resp = paas_cc.get_cluster_snapshot(access_token, project_id, cluster_id)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("获取集群快照失败,{}").format(resp.get("message")))
return resp.get("data") or {}
def get_cluster_info(access_token, project_id, cluster_id):
resp = paas_cc.get_cluster(access_token, project_id, cluster_id)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("获取集群信息失败,{}").format(resp.get("message")))
return resp.get("data") or {}
def update_cluster_status(access_token, project_id, cluster_id, status):
"""更新集群状态"""
data = {"status": status}
resp = paas_cc.update_cluster(access_token, project_id, cluster_id, data)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("更新集群状态失败,{}").format(resp.get("message")))
return resp.get("data") or {}
@parse_response_data(default_data={})
def get_cluster(access_token, project_id, cluster_id):
return paas_cc.get_cluster(access_token, project_id, cluster_id)
@region.cache_on_arguments(expiration_time=3600 * 24 * 7)
def get_cluster_coes(access_token, project_id, cluster_id):
"""获取集群类型,因为集群创建后,集群类型不允许修改
TODO: 为减少调用接口耗时,是否需要缓存?
"""
cluster = get_cluster(access_token, project_id, cluster_id)
return cluster["type"]
@parse_response_data()
def delete_cluster(access_token, project_id, cluster_id):
return paas_cc.delete_cluster(access_token, project_id, cluster_id)
def get_cc_zk_config(access_token, project_id, cluster_id):
resp = paas_cc.get_zk_config(access_token, project_id, cluster_id)
if resp.get("code") != ErrorCode.NoError:
raise error_codes.APIError(_("通过cc获取zk信息出错,{}").format(resp.get("message")))
data = resp.get("data")
if not data:
raise error_codes.APIError(_("通过cc获取zk信息为空"))
return data[0]
def get_cc_repo_domain(access_token, project_id, cluster_id):
return paas_cc.get_jfrog_domain(access_token, project_id, cluster_id)
@parse_response_data()
def update_cc_nodes_status(access_token, project_id, cluster_id, nodes):
"""更新记录的节点状态"""
return paas_cc.update_node_list(access_token, project_id, cluster_id, data=nodes)
|
import pytest
import os
import numpy as np
import pandas as pd
import io
from nexoclom import __file__ as basefile
from nexoclom.atomicdata import gValue, PhotoRate
from nexoclom.atomicdata.initialize_atomicdata import (make_gvalue_table,
make_photorates_table)
@pytest.mark.atomicdata
def test_make_gvalue_table(monkeypatch, tmpdir):
gvalue_true_file = os.path.join(os.path.dirname(basefile), 'data', 'g-values',
'g-values.pkl')
gvalue_test_file = tmpdir.join('g-values_test.pkl')
monkeypatch.setattr(gValue, "gvalue_filename", lambda: gvalue_test_file)
# monkeypatch.setattr(gValue, 'gvalue_file', gvalue_test_file)
make_gvalue_table()
gvalue_true = pd.read_pickle(gvalue_true_file)
gvalue_test = pd.read_pickle(gvalue_test_file)
assert np.all(gvalue_true == gvalue_test)
@pytest.mark.atomicdata
def test_make_photorates_table(monkeypatch, tmpdir):
monkeypatch.setattr('sys.stdin', io.StringIO('0'))
photo_true_file = os.path.join(os.path.dirname(basefile), 'data', 'Loss',
'photorates.pkl')
photo_test_file = tmpdir.join('photorates.pkl')
monkeypatch.setattr(PhotoRate, 'photorates_filename', lambda: photo_test_file)
make_photorates_table()
photo_true = pd.read_pickle(photo_true_file)
photo_test = pd.read_pickle(photo_test_file)
assert np.all(photo_true == photo_test)
|
import pytest
from abuse_whois.matchers.whois import get_contact_from_whois
@pytest.mark.parametrize(
"hostname",
[
"1.1.1.1",
"github.com",
],
)
def test_get_contact_from_whois(hostname: str):
assert get_contact_from_whois(hostname) is not None
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from omegaconf import DictConfig
from nemo.collections.asr.models.classification_models import EncDecRegressionModel
@pytest.fixture()
def speech_regression_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 32,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.conv_asr.ConvASRDecoderClassification',
'params': {'feat_in': 32, 'return_logits': True, 'num_classes': 1},
}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'labels': None,
'is_regression_task': True,
}
)
model = EncDecRegressionModel(cfg=modelConfig)
return model
class TestEncDecRegressionModel:
@pytest.mark.unit
def test_constructor(self, speech_regression_model):
asr_model = speech_regression_model.train()
conv_cnt = (64 * 32 * 1 + 32) + (64 * 1 * 1 + 32) # separable kernel + bias + pointwise kernel + bias
bn_cnt = (4 * 32) * 2 # 2 * moving averages
dec_cnt = 32 * 1 + 1 # fc + bias
param_count = conv_cnt + bn_cnt + dec_cnt
assert asr_model.num_weights == param_count
# Check to/from config_dict:
confdict = asr_model.to_config_dict()
instance2 = EncDecRegressionModel.from_config_dict(confdict)
assert isinstance(instance2, EncDecRegressionModel)
@pytest.mark.unit
def test_transcription(self, speech_regression_model, test_data_dir):
audio_filenames = ['an22-flrp-b.wav', 'an90-fbbh-b.wav']
audio_paths = [os.path.join(test_data_dir, "asr", "train", "an4", "wav", fp) for fp in audio_filenames]
model = speech_regression_model.eval()
# Test Top 1 classification transcription
results = model.transcribe(audio_paths, batch_size=2)
assert len(results) == 2
|
import copy
import json
import re
import botocore
import skwadon.main as sic_main
import skwadon.lib as sic_lib
import skwadon.common_action as common_action
class ConnectionListHandler(common_action.ListHandler):
def __init__(self, session):
self.session = session
self.glue_client = None
def init_client(self):
if self.glue_client == None:
self.glue_client = self.session.client("glue")
def list(self):
self.init_client()
result = []
res = self.glue_client.get_connections()
while True:
for elem in res['ConnectionList']:
name = elem["Name"]
result.append(name)
if not "NextToken" in res:
break
res = self.glue_client.get_connections(NextToken = res["NextToken"])
return result
def child_handler(self, name):
self.init_client()
return common_action.NamespaceHandler(
"conf", ["conf"], {
"conf": ConnectionConfHandler(self.glue_client, name),
"connection": ConnectionConnectionHandler(self.glue_client, name),
})
class ConnectionConfHandler(common_action.ResourceHandler):
properties = [
"Description",
"ConnectionType",
"MatchCriteria",
"ConnectionProperties",
"PhysicalConnectionRequirements",
]
def __init__(self, glue_client, connection_name):
self.glue_client = glue_client
self.connection_name = connection_name
def describe(self):
try:
res = self.glue_client.get_connection(Name = self.connection_name)
curr_data = sic_lib.pickup(res["Connection"], self.properties)
return curr_data
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "EntityNotFoundException":
return None
else:
raise
def create(self, confirmation_flag, src_data):
update_data = sic_lib.pickup(src_data, self.properties)
update_data["Name"] = self.connection_name
sic_main.exec_put(confirmation_flag,
f"glue_client.create_connection(ConnectionInput = {{Name = {self.connection_name}, ...}})",
lambda:
self.glue_client.create_connection(ConnectionInput = update_data)
)
def update(self, confirmation_flag, src_data, curr_data):
update_data = sic_lib.pickupAndCompareForUpdate(src_data, curr_data, self.properties)
if update_data != None:
update_data["Name"] = self.connection_name
sic_main.exec_put(confirmation_flag,
f"glue_client.update_connection(Name = {self.connection_name}, ...)",
lambda:
self.glue_client.update_connection(Name = self.connection_name, ConnectionInput = update_data)
)
class ConnectionConnectionHandler(common_action.ResourceHandler):
properties = [
"Description",
"ConnectionType",
"MatchCriteria",
"ConnectionProperties",
"PhysicalConnectionRequirements",
]
def __init__(self, glue_client, connection_name):
self.glue_client = glue_client
self.connection_name = connection_name
def describe(self):
res = self.glue_client.get_connection(Name = self.connection_name)
jdbc_url = sic_lib.dictlib_get(res, "Connection.ConnectionProperties.JDBC_CONNECTION_URL")
curr_data = {
}
if jdbc_url != None:
curr_data["JBCDUrl"] = jdbc_url
m = re.compile("\Ajdbc:([^:]+)://([^:]+):([0-9]+)/(.+)\Z").search(jdbc_url)
if m:
driver = m.group(1)
if driver == "postgresql" or driver == "redshift":
host = m.group(2)
port = m.group(3)
db_name = m.group(4)
user_name = sic_lib.dictlib_get(res, "Connection.ConnectionProperties.USERNAME")
password = sic_lib.dictlib_get(res, "Connection.ConnectionProperties.PASSWORD")
cmd = f"PGPASSWORD='{password}' psql -h {host} -p {port} -U {user_name} -d {db_name}"
curr_data["CommandLine"] = cmd
return curr_data
def create(self, confirmation_flag, src_data):
pass
def update(self, confirmation_flag, src_data, curr_data):
pass
|
def loop_example(list_to_loop_through):
""" Assuming each item in list_to_loop_through is a number, return a list of each item in that list squared. """
print "I'm going to begin to loop through this list: ", list_to_loop_through, "\n"
list_items_squared = []
for each_item in list_to_loop_through:
print "Now I'm on: ", each_item
print "{0} squared is {1}\n".format(each_item, each_item**2)
list_items_squared.append(each_item**2)
print "Now I'm done looping through the list, and I'm going to return the new list, where each list item has been squared."
return list_items_squared
##Sample Output
##
##>>> my_list = [1, 3, 4, 5, 6, 78, 2334]
##>>> loop_example(my_list)
##I'm going to begin to loop through this list: [1, 3, 4, 5, 6, 78, 2334]
##Now I'm on: 1
##1 squared is 1
##
##Now I'm on: 3
##3 squared is 9
##
##Now I'm on: 4
##4 squared is 16
##
##Now I'm on: 5
##5 squared is 25
##
##Now I'm on: 6
##6 squared is 36
##
##Now I'm on: 78
##78 squared is 6084
##
##Now I'm on: 2334
##2334 squared is 5447556
##
##[1, 9, 16, 25, 36, 6084, 5447556]
|
"""
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
@author jrm
"""
import ctypes
from ctypes.util import find_library
from atom.api import Atom, Float, Value, Str, Int, Typed
from enaml.application import ProxyResolver
from . import factories
from .bridge import ObjcBridgeObject, ObjcMethod
from ..core.app import BridgedApplication
class ENBridge(ObjcBridgeObject):
""" Access ENBridge.m using ctypes.
Based on:
https://stackoverflow.com/questions/1490039/
calling-objective-c-functions-from-python#1490644
"""
#: Objc library
objc = Value()
#: Bridge.m access via ctypes
bridge = Value()
def _default_objc(self):
""" Load the objc library using ctypes. """
objc = ctypes.cdll.LoadLibrary(find_library('objc'))
objc.objc_getClass.restype = ctypes.c_void_p
objc.sel_registerName.restype = ctypes.c_void_p
objc.objc_msgSend.restype = ctypes.c_void_p
objc.objc_msgSend.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
return objc
def _default_bridge(self):
""" Get an instance of the ENBridge object using ctypes. """
objc = self.objc
ENBridge = objc.objc_getClass('ENBridge')
return objc.objc_msgSend(ENBridge, objc.sel_registerName('instance'))
def processEvents(self, data):
""" Sends msgpack data to the ENBridge instance
by calling the processEvents method via ctypes. """
objc = self.objc
bridge = self.bridge
#: This must come after the above as it changes the arguments!
objc.objc_msgSend.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_char_p, ctypes.c_int]
objc.objc_msgSend(
bridge, objc.sel_registerName('processEvents:length:'),
data, len(data))
#: Add a target to a UIControl that invokes a python callback
addTarget = ObjcMethod('UIControl',
dict(forControlEvents="enum"),#""UIControlEvents"),
dict(andCallback="int"),
dict(usingMethod="NSString"),
dict(withValues="NSArray"))
class AppDelegate(ObjcBridgeObject):
pass
class ViewController(ObjcBridgeObject):
displayView = ObjcMethod('UIView')
class IPhoneApplication(BridgedApplication):
""" An iPhone implementation of an Enaml Native BridgedApplication.
An IPhoneApplication uses the native iOS widget toolkit to implement an
Enaml UI that runs in the local process.
Since Objective-C can easily use the Python C-API, much if this classes
implementation is done directly. For instance, the AppEventListener API is
implemented directly in Objective-C (in Bridge.m) and invokes methods
on this directly.
"""
#: AppDelegate widget
app_delegate = Typed(AppDelegate)
#: ViewControler
view_controller = Typed(ViewController)
#: ENBridge
bridge = Typed(ENBridge)
#: Pixel density of the device
#: Loaded immediately as this is used often.
dp = Float()
# --------------------------------------------------------------------------
# Defaults
# --------------------------------------------------------------------------
def _default_app_delegate(self):
""" Return a bridge object reference to the AppDelegate
this bridge sets this using a special id of -1
"""
return AppDelegate(__id__=-1)
def _default_view_controller(self):
""" Return a bridge object reference to the ViewController
the bridge sets this using a special id of -2
"""
return ViewController(__id__=-2)
def _default_bridge(self):
""" Access the bridge using ctypes. Everything else should use
bridge objects.
"""
return ENBridge(__id__=-4)
def _default_dp(self):
return 1.0
# -------------------------------------------------------------------------
# IPhoneApplication Constructor
# -------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
""" Initialize a IPhoneApplication.
"""
super(IPhoneApplication, self).__init__(*args, **kwargs)
self.resolver = ProxyResolver(factories=factories.IOS_FACTORIES)
# -------------------------------------------------------------------------
# Bridge API Implementation
# -------------------------------------------------------------------------
def show_view(self):
""" Show the current `app.view`. This will fade out the previous
with the new view.
"""
self.view_controller.displayView(self.get_view())
def dispatch_events(self, data):
""" Send the data to the Native application for processing """
self.bridge.processEvents(data)
# -------------------------------------------------------------------------
# iPhone utilities API Implementation
# -------------------------------------------------------------------------
def _observe_keep_screen_on(self, change):
""" Sets or clears the flag to keep the screen on. """
raise NotImplementedError
def set_screen_on(window):
from .ios_window import Window
window = Window(__id__=window)
if self.keep_screen_on:
window.addFlags(Window.FLAG_KEEP_SCREEN_ON)
else:
window.clearFlags(Window.FLAG_KEEP_SCREEN_ON)
self.widget.getWindow().then(set_screen_on)
# -------------------------------------------------------------------------
# Plugin API Implementation
# -------------------------------------------------------------------------
def load_plugin_factories(self):
""" Add any plugin toolkit widgets to the ANDROID_FACTORIES """
for plugin in self.get_plugins(group='enaml_native_ios_factories'):
get_factories = plugin.load()
PLUGIN_FACTORIES = get_factories()
factories.IOS_FACTORIES.update(PLUGIN_FACTORIES)
|
"""
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
from ctypes import c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_string,
)
from django.contrib.gis.geos.prototypes.geom import geos_char_p
class Topology(GEOSFuncFactory):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# Topology Routines
geos_boundary = Topology('GEOSBoundary')
geos_buffer = Topology('GEOSBuffer', argtypes=[GEOM_PTR, c_double, c_int])
geos_centroid = Topology('GEOSGetCentroid')
geos_convexhull = Topology('GEOSConvexHull')
geos_difference = Topology('GEOSDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_envelope = Topology('GEOSEnvelope')
geos_intersection = Topology('GEOSIntersection', argtypes=[GEOM_PTR, GEOM_PTR])
geos_linemerge = Topology('GEOSLineMerge')
geos_pointonsurface = Topology('GEOSPointOnSurface')
geos_preservesimplify = Topology('GEOSTopologyPreserveSimplify', argtypes=[GEOM_PTR, c_double])
geos_simplify = Topology('GEOSSimplify', argtypes=[GEOM_PTR, c_double])
geos_symdifference = Topology('GEOSSymDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_union = Topology('GEOSUnion', argtypes=[GEOM_PTR, GEOM_PTR])
geos_cascaded_union = GEOSFuncFactory('GEOSUnionCascaded', argtypes=[GEOM_PTR], restype=GEOM_PTR)
geos_unary_union = GEOSFuncFactory('GEOSUnaryUnion', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFuncFactory(
'GEOSRelate', argtypes=[GEOM_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
# Linear referencing routines
geos_project = GEOSFuncFactory(
'GEOSProject', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate = Topology('GEOSInterpolate', argtypes=[GEOM_PTR, c_double])
geos_project_normalized = GEOSFuncFactory(
'GEOSProjectNormalized', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate_normalized = Topology('GEOSInterpolateNormalized', argtypes=[GEOM_PTR, c_double])
|
import time
import simple_queue
import simple_http_client
class Task(object):
def __init__(self, logger, config, method, host, path, headers, body, queue, url, timeout):
self.logger = logger
self.config = config
self.method = method
self.host = host
self.path = path
self.headers = headers
self.body = body
self.queue = queue
self.url = url
self.timeout = timeout
self.start_time = time.time()
self.unique_id = "%s:%f" % (url, self.start_time)
self.trace_time = []
self.body_queue = simple_queue.Queue()
self.body_len = 0
self.body_readed = 0
self.content_length = None
self.worker = None
self.read_buffers = []
self.read_buffer_len = 0
self.responsed = False
self.finished = False
self.retry_count = 0
def to_string(self):
out_str = " Task:%s\r\n" % self.url
out_str += " responsed:%d" % self.responsed
out_str += " retry_count:%d" % self.retry_count
out_str += " start_time:%d" % (time.time() - self.start_time)
out_str += " body_readed:%d\r\n" % self.body_readed
out_str += " Trace:%s" % self.get_trace()
out_str += "\r\n"
return out_str
def put_data(self, data):
self.body_queue.put(data)
self.body_len += len(data)
def read(self, size=None):
# fail or cloe if return ""
if self.body_readed == self.content_length:
return memoryview(b'')
if size:
while self.read_buffer_len < size:
data = self.body_queue.get(self.timeout)
if not data:
return memoryview(b'')
self.read_buffers.append(data)
self.read_buffer_len += len(data)
if len(self.read_buffers[0]) == size:
data = self.read_buffers[0]
self.read_buffers.pop(0)
self.read_buffer_len -= size
elif len(self.read_buffers[0]) > size:
data = self.read_buffers[0][:size]
self.read_buffers[0] = self.read_buffers[0][size:]
self.read_buffer_len -= size
else:
buff = bytearray(self.read_buffer_len)
buff_view = memoryview(buff)
p = 0
for data in self.read_buffers:
buff_view[p:p+len(data)] = data
p += len(data)
if self.read_buffer_len == size:
self.read_buffers = []
self.read_buffer_len = 0
data = buff_view
else:
data = buff_view[:size]
self.read_buffers = [buff_view[size:]]
self.read_buffer_len -= size
else:
if self.read_buffers:
data = self.read_buffers.pop(0)
self.read_buffer_len -= len(data)
else:
data = self.body_queue.get(self.timeout)
if not data:
return memoryview(b'')
self.body_readed += len(data)
return data
def read_all(self):
if self.content_length:
buff = bytearray(int(self.content_length))
buff_view = memoryview(buff)
p = 0
for data in self.read_buffers:
buff_view[p:p+len(data)] = data
p += len(data)
self.read_buffers = []
self.read_buffer_len = 0
while p < self.content_length:
data = self.read()
if not data:
break
buff_view[p:p + len(data)] = data[0:len(data)]
p += len(data)
return buff_view[:p]
else:
out = list()
while True:
data = self.read()
if not data:
break
if isinstance(data, memoryview):
data = data.tobytes()
out.append(data)
out_buf = "".join(out)
return memoryview(out_buf)
def set_state(self, stat):
# for debug trace
time_now = time.time()
self.trace_time.append((time_now, stat))
if self.config.show_state_debug:
self.logger.debug("%s stat:%s", self.unique_id, stat)
return time_now
def get_trace(self):
out_list = []
last_time = self.start_time
for t, stat in self.trace_time:
time_diff = int((t - last_time) * 1000)
last_time = t
out_list.append("%d:%s" % (time_diff, stat))
out_list.append(":%d" % ((time.time()-last_time)*1000))
return ",".join(out_list)
def response_fail(self, reason=""):
if self.responsed:
self.logger.error("http_common responsed_fail but responed.%s", self.url)
self.put_data("")
return
self.responsed = True
err_text = "response_fail:%s" % reason
self.logger.debug("%s %s", self.url, err_text)
res = simple_http_client.BaseResponse(body=err_text)
res.task = self
res.worker = self.worker
self.queue.put(res)
self.finish()
def finish(self):
if self.finished:
return
self.put_data("")
self.finished = True
class HttpWorker(object):
def __init__(self, logger, ip_manager, config, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data):
self.logger = logger
self.ip_manager = ip_manager
self.config = config
self.ssl_sock = ssl_sock
self.init_rtt = ssl_sock.handshake_time / 3
self.rtt = self.init_rtt
self.speed = 1
self.ip = ssl_sock.ip
self.close_cb = close_cb
self.retry_task_cb = retry_task_cb
self.idle_cb = idle_cb
self.log_debug_data = log_debug_data
self.accept_task = True
self.keep_running = True
self.processed_tasks = 0
self.speed_history = []
self.last_recv_time = self.ssl_sock.create_time
self.last_send_time = self.ssl_sock.create_time
def update_debug_data(self, rtt, sent, received, speed):
self.rtt = rtt
self.speed = speed
self.speed_history.append(speed)
self.log_debug_data(rtt, sent, received)
def close(self, reason):
self.accept_task = False
self.keep_running = False
self.ssl_sock.close()
if reason not in ["idle timeout"]:
self.logger.debug("%s worker close:%s", self.ip, reason)
self.ip_manager.report_connect_closed(self.ssl_sock.ip, reason)
self.close_cb(self)
def get_score(self):
now = time.time()
inactive_time = now - self.last_recv_time
rtt = self.rtt
if inactive_time > 30:
if rtt > 1000:
rtt = 1000
if self.version == "1.1":
rtt += 100
else:
rtt += len(self.streams) * 500
if inactive_time > 1:
score = rtt
elif inactive_time < 0.1:
score = rtt + 1000
else:
# inactive_time < 2
score = rtt + (1 / inactive_time) * 1000
return score
def get_host(self, task_host):
if task_host:
return task_host
else:
return self.ssl_sock.host
|
try: input = raw_input # checks to see if in Python 3x or 2x to get input() command
except NameError: pass
import praw #Makes sure we can use the module
#The next part will tell reddit that we are who we are, so that we can
#grab info from accounts.
reddit = praw.Reddit(client_id='<YOUR CLIENT ID HERE>',
client_secret='<YOUR CLIENT SECRET HERE>',
user_agent='RPi Karma Viewer') # This can be the title of the project
me = reddit.redditor(input("Enter a reddit username: ")) #Defines the reddit username
linkkarma = me.link_karma #Defines the variable linkkarma as the username's link karma
commentkarma = me.comment_karma #Defines the variable commentkarma as the username's comment karma
totalkarma = linkkarma + commentkarma #This will make a variable called totalkarma that will add both of the karma variables together.
print "Reddit username karma viewer (RAKV) Version 1.1"
print "Script available at https://github.com/jacksonsevendelta/Reddit-Karma-Viewer"
print "You selected reddit username u/{}.".format(me)
print "TOTAL KARMA:",totalkarma
print "LINK KARMA:",linkkarma
print "COMMENT KARMA:",commentkarma
print "Finished for username",me
print "Additionally, you can find this user's page at https://reddit.com/u/{}.".format(me)
|
__author__ = 'Tony Beltramelli - www.tonybeltramelli.com'
__modified = 'Kevin Kössl'
CONTEXT_LENGTH = 48
IMAGE_SIZE = 160
BATCH_SIZE = 64
EPOCHS = 10
STEPS_PER_EPOCH = 72000
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tianyu Zhu <zhutianyu1991@gmail.com>
#
'''
PBC spin-restricted G0W0-CD QP eigenvalues with k-point sampling
This implementation has the same scaling (N^4) as GW-AC, more robust but slower.
GW-CD is particularly recommended for accurate core and high-energy states.
Method:
See T. Zhu and G.K.-L. Chan, arxiv:2007.03148 (2020) for details
Compute Sigma directly on real axis with density fitting
through a contour deformation method
'''
from functools import reduce
import numpy
import numpy as np
import h5py
from scipy.optimize import newton, least_squares
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import _conc_mos
from pyscf.pbc import df, dft, scf
from pyscf.pbc.mp.kmp2 import get_nocc, get_nmo, get_frozen_mask
from pyscf import __config__
einsum = lib.einsum
def kernel(gw, mo_energy, mo_coeff, orbs=None,
kptlist=None, nw=None, verbose=logger.NOTE):
'''GW-corrected quasiparticle orbital energies
Returns:
A list : converged, mo_energy, mo_coeff
'''
mf = gw._scf
if gw.frozen is None:
frozen = 0
else:
frozen = gw.frozen
assert (frozen == 0)
if orbs is None:
orbs = range(gw.nmo)
if kptlist is None:
kptlist = range(gw.nkpts)
nkpts = gw.nkpts
nklist = len(kptlist)
# v_xc
dm = np.array(mf.make_rdm1())
v_mf = np.array(mf.get_veff()) - np.array(mf.get_j(dm_kpts=dm))
for k in range(nkpts):
v_mf[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), v_mf[k], mo_coeff[k]))
nocc = gw.nocc
nmo = gw.nmo
# v_hf from DFT/HF density
if gw.fc:
exxdiv = 'ewald'
else:
exxdiv = None
rhf = scf.KRHF(gw.mol, gw.kpts, exxdiv=exxdiv)
rhf.with_df = gw.with_df
if getattr(gw.with_df, '_cderi', None) is None:
raise RuntimeError('Found incompatible integral scheme %s.'
'KGWCD can be only used with GDF integrals' %
gw.with_df.__class__)
vk = rhf.get_veff(gw.mol,dm_kpts=dm) - rhf.get_j(gw.mol,dm_kpts=dm)
for k in range(nkpts):
vk[k] = reduce(numpy.dot, (mo_coeff[k].T.conj(), vk[k], mo_coeff[k]))
# Grids for integration on imaginary axis
freqs,wts = _get_scaled_legendre_roots(nw)
logger.debug(gw, "Computing the imaginary part")
Wmn, Del_00, Del_P0, qij, q_abs = get_WmnI_diag(gw, orbs, kptlist, freqs)
conv = True
mo_energy = np.zeros_like(np.array(mf.mo_energy))
for k in range(nklist):
kn = kptlist[k]
for p in orbs:
if p < nocc:
delta = -2e-2
else:
delta = 2e-2
if gw.linearized:
# FIXME
logger.warn(gw,'linearization with CD leads to wrong quasiparticle energy')
raise NotImplementedError
else:
# self-consistently solve QP equation
def quasiparticle(omega):
if gw.fc:
sigmaR = get_sigma_diag(gw, omega, kn, p, Wmn[:,k,:,p-orbs[0],:],
Del_00, Del_P0[k,p-orbs[0],:], freqs, wts, qij, q_abs).real
else:
sigmaR = get_sigma_diag(gw, omega, kn, p, Wmn[:,k,:,p-orbs[0],:],
Del_00, Del_P0, freqs, wts, qij, q_abs).real
return omega - mf.mo_energy[kn][p] - (sigmaR.real + vk[kn,p,p].real - v_mf[kn,p,p].real)
try:
e = newton(quasiparticle, mf.mo_energy[kn][p]+delta, tol=1e-6, maxiter=50)
logger.debug(gw, "Computing poles for QP (k: %s, orb: %s)"%(kn,p))
mo_energy[kn,p] = e
except RuntimeError:
conv = False
mo_coeff = mf.mo_coeff
if gw.verbose >= logger.DEBUG:
numpy.set_printoptions(threshold=nmo)
for k in range(nkpts):
logger.debug(gw, ' GW mo_energy @ k%d =\n%s', k,mo_energy[k])
numpy.set_printoptions(threshold=1000)
return conv, mo_energy, mo_coeff
def get_sigma_diag(gw, ep, kp, p, Wmn, Del_00, Del_P0, freqs, wts, qij, q_abs):
'''
Compute self-energy on real axis using contour deformation
'''
nocc = gw.nocc
nkpts = gw.nkpts
# This code does not support metals
homo = -99.
lumo = 99.
for k in range(nkpts):
if homo < gw._scf.mo_energy[k][nocc-1]:
homo = gw._scf.mo_energy[k][nocc-1]
if lumo > gw._scf.mo_energy[k][nocc]:
lumo = gw._scf.mo_energy[k][nocc]
ef = (homo+lumo)/2.
nmo = gw.nmo
sign = np.zeros((nkpts,nmo),dtype=np.int64)
for k in range(nkpts):
sign[k] = np.sign(ef-gw._scf.mo_energy[k])
sigmaI = get_sigmaI_diag(gw, ep, kp, p, Wmn, Del_00, Del_P0, sign, freqs, wts)
sigmaR = get_sigmaR_diag(gw, ep, kp, p, ef, freqs, qij, q_abs)
return sigmaI + sigmaR
def get_rho_response(gw, omega, mo_energy, Lpq, kL, kidx):
'''
Compute density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# Compute Pi for kL
Pi = np.zeros((naux,naux),dtype=np.complex128)
for i, kpti in enumerate(kpts):
# Find ka that conserves with ki and kL (-ki+ka+kL=G)
a = kidx[i]
eia = mo_energy[i,:nocc,None] - mo_energy[a,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pia = einsum('Pia,ia->Pia',Lpq[i][:,:nocc,nocc:],eia)
# Response from both spin-up and spin-down density
Pi += 4./nkpts * einsum('Pia,Qia->PQ',Pia,Lpq[i][:,:nocc,nocc:].conj())
return Pi
def get_WmnI_diag(gw, orbs, kptlist, freqs, max_memory=8000):
'''
Compute GW correlation self-energy (diagonal elements)
in MO basis on imaginary axis
'''
mo_energy = np.array(gw._scf.mo_energy)
mo_coeff = np.array(gw._scf.mo_coeff)
nmo = gw.nmo
nkpts = gw.nkpts
kpts = gw.kpts
nklist = len(kptlist)
nw = len(freqs)
norbs = len(orbs)
mydf = gw.with_df
# possible kpts shift center
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
Del_00, Del_P0, qij, q_abs = None, None, None, None
if gw.fc:
# Set up q mesh for q->0 finite size correction
q_pts = np.array([1e-3,0,0]).reshape(1,3)
q_abs = gw.mol.get_abs_kpts(q_pts)
# Get qij = 1/sqrt(Omega) * < psi_{ik} | e^{iqr} | psi_{ak-q} > at q: (nkpts, nocc, nvir)
qij = get_qij(gw, q_abs[0], mo_coeff)
Wmn = np.zeros((nkpts,nklist,nmo,norbs,nw),dtype=np.complex128)
if gw.fc:
Del_P0 = np.zeros((nklist,norbs,nw),dtype=np.complex128)
Del_00 = np.zeros(nw,dtype=np.complex128)
for kL in range(nkpts):
# Lij: (ki, L, i, j) for looping every kL
Lij = []
# kidx: save kj that conserves with kL and ki (-ki+kj+kL=G)
# kidx_r: save ki that conserves with kL and kj (-ki+kj+kL=G)
kidx = np.zeros((nkpts),dtype=np.int64)
kidx_r = np.zeros((nkpts),dtype=np.int64)
for i, kpti in enumerate(kpts):
for j, kptj in enumerate(kpts):
# Find (ki,kj) that satisfies momentum conservation with kL
kconserv = -kscaled[i] + kscaled[j] + kscaled[kL]
is_kconserv = np.linalg.norm(np.round(kconserv) - kconserv) < 1e-12
if is_kconserv:
kidx[i] = j
kidx_r[j] = i
logger.debug(gw, "Read Lpq (kL: %s / %s, ki: %s, kj: %s)"%(kL+1, nkpts, i, j))
Lij_out = None
# Read (L|pq) and ao2mo transform to (L|ij)
Lpq = []
for LpqR, LpqI, sign \
in mydf.sr_loop([kpti, kptj], max_memory=0.1*gw._scf.max_memory, compact=False):
Lpq.append(LpqR+LpqI*1.0j)
# support uneqaul naux on different k points
Lpq = np.vstack(Lpq).reshape(-1,nmo**2)
tao = []
ao_loc = None
moij, ijslice = _conc_mos(mo_coeff[i], mo_coeff[j])[2:]
Lij_out = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=Lij_out)
Lij.append(Lij_out.reshape(-1,nmo,nmo))
Lij = np.asarray(Lij)
naux = Lij.shape[1]
if kL == 0:
for w in range(nw):
# body dielectric matrix eps_body
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
eps_body_inv = np.linalg.inv(np.eye(naux)-Pi)
if gw.fc:
# head dielectric matrix eps_00
Pi_00 = get_rho_response_head(gw, freqs[w], mo_energy, qij)
eps_00 = 1. - 4. * np.pi/np.linalg.norm(q_abs[0])**2 * Pi_00
# wings dielectric matrix eps_P0
Pi_P0 = get_rho_response_wing(gw, freqs[w], mo_energy, Lij, qij)
eps_P0 = -np.sqrt(4.*np.pi) / np.linalg.norm(q_abs[0]) * Pi_P0
# inverse dielectric matrix
eps_inv_00 = 1./(eps_00 - np.dot(np.dot(eps_P0.conj(),eps_body_inv),eps_P0))
eps_inv_P0 = -eps_inv_00 * np.dot(eps_body_inv, eps_P0)
# head correction
Del_00[w] = 2./np.pi * (6.*np.pi**2/gw.mol.vol/nkpts)**(1./3.) * (eps_inv_00 - 1.)
wings_const = np.sqrt(gw.mol.vol/4./np.pi**3) * (6.*np.pi**2/gw.mol.vol/nkpts)**(2./3.)
eps_inv_PQ = eps_body_inv
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),eps_inv_PQ-np.eye(naux))
Wmn[km,k,:,:,w] = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
if gw.fc:
# compute wing correction
Wn_P0 = einsum('Pnm,P->nm',Lij[kn],eps_inv_P0).diagonal()
Wn_P0 = Wn_P0.real * 2.
Del_P0[k,:,w] = wings_const * Wn_P0[orbs]
else:
for w in range(nw):
Pi = get_rho_response(gw, freqs[w], mo_energy, Lij, kL, kidx)
Pi_inv = np.linalg.inv(np.eye(naux)-Pi)-np.eye(naux)
for k in range(nklist):
kn = kptlist[k]
# Find km that conserves with kn and kL (-km+kn+kL=G)
km = kidx_r[kn]
Qmn = einsum('Pmn,PQ->Qmn',Lij[km][:,:,orbs].conj(),Pi_inv)
Wmn[km,k,:,:,w] = 1./nkpts * einsum('Qmn,Qmn->mn',Qmn,Lij[km][:,:,orbs])
return Wmn, Del_00, Del_P0, qij, q_abs
def get_sigmaI_diag(gw, omega, kp, p, Wmn, Del_00, Del_P0, sign, freqs, wts):
'''
Compute self-energy by integrating on imaginary axis
'''
mo_energy = gw._scf.mo_energy
nkpts = gw.nkpts
sigma = 0j
for k in range(nkpts):
emo = omega - 1j*gw.eta*sign[k] - mo_energy[k]
g0 = wts[None,:]*emo[:,None] / ((emo**2)[:,None]+(freqs**2)[None,:])
sigma += -einsum('mw,mw',g0,Wmn[k])/np.pi
if gw.fc and k == kp:
sigma += -einsum('w,w->',Del_00,g0[p])/np.pi
sigma += -einsum('w,w->',Del_P0,g0[p])/np.pi
return sigma
def get_rho_response_R(gw, omega, mo_energy, Lpq, kL, kidx):
'''
Compute density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
# Compute Pi for kL
Pi = np.zeros((naux,naux),dtype=np.complex128)
for i, kpti in enumerate(kpts):
# Find ka that conserves with ki and kL (-ki+ka+kL=G)
a = kidx[i]
eia = mo_energy[i,:nocc,None] - mo_energy[a,None,nocc:]
eia = 1./(omega+eia+2j*gw.eta) + 1./(-omega+eia)
Pia = einsum('Pia,ia->Pia',Lpq[i][:,:nocc,nocc:],eia)
# Response from both spin-up and spin-down density
Pi += 2./nkpts * einsum('Pia,Qia->PQ',Pia,Lpq[i][:,:nocc,nocc:].conj())
return Pi
def get_sigmaR_diag(gw, omega, kn, orbp, ef, freqs, qij, q_abs):
'''
Compute self-energy for poles inside coutour
(more and more expensive away from Fermi surface)
'''
mo_energy = np.array(gw._scf.mo_energy)
mo_coeff = np.array(gw._scf.mo_coeff)
nmo = gw.nmo
nkpts = gw.nkpts
kpts = gw.kpts
mydf = gw.with_df
# possible kpts shift center
kscaled = gw.mol.get_scaled_kpts(kpts)
kscaled -= kscaled[0]
idx = []
for k in range(nkpts):
if omega > ef:
fm = 1.0
idx.append(np.where((mo_energy[k]<omega) & (mo_energy[k]>ef))[0])
else:
fm = -1.0
idx.append(np.where((mo_energy[k]>omega) & (mo_energy[k]<ef))[0])
sigmaR = 0j
for kL in range(nkpts):
# Lij: (ki, L, i, j) for looping every kL
Lij = []
# kidx: save kj that conserves with kL and ki (-ki+kj+kL=G)
# kidx_r: save ki that conserves with kL and kj (-ki+kj+kL=G)
kidx = np.zeros((nkpts),dtype=np.int64)
kidx_r = np.zeros((nkpts),dtype=np.int64)
for i, kpti in enumerate(kpts):
for j, kptj in enumerate(kpts):
# Find (ki,kj) that satisfies momentum conservation with kL
kconserv = -kscaled[i] + kscaled[j] + kscaled[kL]
is_kconserv = np.linalg.norm(np.round(kconserv) - kconserv) < 1e-12
if is_kconserv:
kidx[i] = j
kidx_r[j] = i
km = kidx_r[kn]
if len(idx[km]) > 0:
for i, kpti in enumerate(kpts):
for j, kptj in enumerate(kpts):
# Find (ki,kj) that satisfies momentum conservation with kL
kconserv = -kscaled[i] + kscaled[j] + kscaled[kL]
is_kconserv = np.linalg.norm(np.round(kconserv) - kconserv) < 1e-12
if is_kconserv:
kidx[i] = j
kidx_r[j] = i
#logger.debug(gw, "Read Lpq (kL: %s / %s, ki: %s, kj: %s)"%(kL+1, nkpts, i, j))
Lij_out = None
# Read (L|pq) and ao2mo transform to (L|ij)
Lpq = []
for LpqR, LpqI, sign \
in mydf.sr_loop([kpti, kptj], max_memory=0.1*gw._scf.max_memory, compact=False):
Lpq.append(LpqR+LpqI*1.0j)
# support uneqaul naux on different k points
Lpq = np.vstack(Lpq).reshape(-1,nmo**2)
tao = []
ao_loc = None
moij, ijslice = _conc_mos(mo_coeff[i], mo_coeff[j])[2:]
Lij_out = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=Lij_out)
Lij.append(Lij_out.reshape(-1,nmo,nmo))
Lij = np.asarray(Lij)
naux = Lij.shape[1]
if kL == 0:
km = kidx_r[kn]
if len(idx[km]) > 0:
for m in idx[km]:
em = mo_energy[km][m] - omega
# body dielectric matrix eps_body
Pi = get_rho_response_R(gw, abs(em), mo_energy, Lij, kL, kidx)
eps_body_inv = np.linalg.inv(np.eye(naux)-Pi)
if gw.fc and m == orbp:
# head dielectric matrix eps_00
Pi_00 = get_rho_response_head_R(gw, abs(em), mo_energy, qij)
eps_00 = 1. - 4. * np.pi/np.linalg.norm(q_abs[0])**2 * Pi_00
# wings dielectric matrix eps_P0
Pi_P0 = get_rho_response_wing_R(gw, abs(em), mo_energy, Lij, qij)
eps_P0 = -np.sqrt(4.*np.pi) / np.linalg.norm(q_abs[0]) * Pi_P0
# inverse dielectric matrix
eps_inv_00 = 1./(eps_00 - np.dot(np.dot(eps_P0.conj(),eps_body_inv),eps_P0))
eps_inv_P0 = -eps_inv_00 * np.dot(eps_body_inv, eps_P0)
eps_inv_PQ = eps_body_inv
# body
Qmn = einsum('P,PQ->Q',Lij[km][:,m,orbp].conj(),eps_inv_PQ-np.eye(naux))
Wmn = 1./nkpts * einsum('Q,Q->',Qmn,Lij[km][:,m,orbp])
sigmaR += fm * Wmn
if gw.fc and m == orbp:
# head correction
Del_00 = 2./np.pi * (6.*np.pi**2/gw.mol.vol/nkpts)**(1./3.) * (eps_inv_00 - 1.)
sigmaR += fm * Del_00
# wings correction
wings_const = np.sqrt(gw.mol.vol/4./np.pi**3) * (6.*np.pi**2/gw.mol.vol/nkpts)**(2./3.)
Wn_P0 = einsum('P,P->',Lij[kn][:,m,orbp].conj(),eps_inv_P0)
Wn_P0 = Wn_P0.real * 2.
sigmaR += fm * wings_const * Wn_P0
else:
km = kidx_r[kn]
if len(idx[km]) > 0:
for m in idx[km]:
em = mo_energy[km][m] - omega
Pi = get_rho_response_R(gw, abs(em), mo_energy, Lij, kL, kidx)
Pi_inv = np.linalg.inv(np.eye(naux)-Pi)-np.eye(naux)
Qmn = einsum('P,PQ->Q',Lij[km][:,m,orbp].conj(),Pi_inv)
Wmn = 1./nkpts * einsum('Q,Q->',Qmn,Lij[km][:,m,orbp])
sigmaR += fm * Wmn
return sigmaR
def get_rho_response_head_R(gw, omega, mo_energy, qij):
'''
Compute head (G=0, G'=0) density response function in auxiliary basis at freq w
'''
nkpts, nocc, nvir = qij.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi head
Pi_00 = 0j
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = 1./(omega+eia+2j*gw.eta) + 1./(-omega+eia)
Pi_00 += 2./nkpts * einsum('ia,ia->',eia,qij[i].conj()*qij[i])
return Pi_00
def get_rho_response_wing_R(gw, omega, mo_energy, Lpq, qij):
'''
Compute density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi for kL
Pi = np.zeros(naux,dtype=np.complex128)
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = 1./(omega+eia+2j*gw.eta) + 1./(-omega+eia)
eia_q = eia * qij[i].conj()
Pi += 2./nkpts * einsum('Pia,ia->P',Lpq[i][:,:nocc,nocc:],eia_q)
return Pi
def get_rho_response_head(gw, omega, mo_energy, qij):
'''
Compute head (G=0, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, nocc, nvir = qij.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi head
Pi_00 = 0j
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
Pi_00 += 4./nkpts * einsum('ia,ia->',eia,qij[i].conj()*qij[i])
return Pi_00
def get_rho_response_wing(gw, omega, mo_energy, Lpq, qij):
'''
Compute wing (G=P, G'=0) density response function in auxiliary basis at freq iw
'''
nkpts, naux, nmo, nmo = Lpq.shape
nocc = gw.nocc
kpts = gw.kpts
# Compute Pi wing
Pi = np.zeros(naux,dtype=np.complex128)
for i, kpti in enumerate(kpts):
eia = mo_energy[i,:nocc,None] - mo_energy[i,None,nocc:]
eia = eia/(omega**2+eia*eia)
eia_q = eia * qij[i].conj()
Pi += 4./nkpts * einsum('Pia,ia->P',Lpq[i][:,:nocc,nocc:],eia_q)
return Pi
def get_qij(gw, q, mo_coeff, uniform_grids=False):
'''
Compute qij = 1/Omega * |< psi_{ik} | e^{iqr} | psi_{ak-q} >|^2 at q: (nkpts, nocc, nvir)
'''
nocc = gw.nocc
nmo = gw.nmo
nvir = nmo - nocc
kpts = gw.kpts
nkpts = len(kpts)
cell = gw.mol
mo_energy = gw._scf.mo_energy
if uniform_grids:
mydf = df.FFTDF(cell, kpts=kpts)
coords = cell.gen_uniform_grids(mydf.mesh)
else:
coords, weights = dft.gen_grid.get_becke_grids(cell,level=5)
ngrid = len(coords)
qij = np.zeros((nkpts,nocc,nvir),dtype=np.complex128)
for i, kpti in enumerate(kpts):
ao_p = dft.numint.eval_ao(cell, coords, kpt=kpti, deriv=1)
ao = ao_p[0]
ao_grad = ao_p[1:4]
if uniform_grids:
ao_ao_grad = einsum('mg,xgn->xmn',ao.T.conj(),ao_grad) * cell.vol / ngrid
else:
ao_ao_grad = einsum('g,mg,xgn->xmn',weights,ao.T.conj(),ao_grad)
q_ao_ao_grad = -1j * einsum('x,xmn->mn',q,ao_ao_grad)
q_mo_mo_grad = np.dot(np.dot(mo_coeff[i][:,:nocc].T.conj(), q_ao_ao_grad), mo_coeff[i][:,nocc:])
enm = 1./(mo_energy[i][nocc:,None] - mo_energy[i][None,:nocc])
dens = enm.T * q_mo_mo_grad
qij[i] = dens / np.sqrt(cell.vol)
return qij
def _get_scaled_legendre_roots(nw):
"""
Scale nw Legendre roots, which lie in the
interval [-1, 1], so that they lie in [0, inf)
Ref: www.cond-mat.de/events/correl19/manuscripts/ren.pdf
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs, wts = np.polynomial.legendre.leggauss(nw)
x0 = 0.5
freqs_new = x0*(1.+freqs)/(1.-freqs)
wts = wts*2.*x0/(1.-freqs)**2
return freqs_new, wts
def _get_clenshaw_curtis_roots(nw):
"""
Clenshaw-Curtis qaudrature on [0,inf)
Ref: J. Chem. Phys. 132, 234114 (2010)
Returns:
freqs : 1D ndarray
wts : 1D ndarray
"""
freqs = np.zeros(nw)
wts = np.zeros(nw)
a = 0.2
for w in range(nw):
t = (w+1.0)/nw * np.pi/2.
freqs[w] = a / np.tan(t)
if w != nw-1:
wts[w] = a*np.pi/2./nw/(np.sin(t)**2)
else:
wts[w] = a*np.pi/4./nw/(np.sin(t)**2)
return freqs[::-1], wts[::-1]
class KRGWCD(lib.StreamObject):
linearized = getattr(__config__, 'gw_gw_GW_linearized', False)
eta = getattr(__config__, 'gw_gw_GW_eta', 1e-3)
fc = getattr(__config__, 'gw_gw_GW_fc', True)
def __init__(self, mf, frozen=0):
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
#TODO: implement frozen orbs
if frozen > 0:
raise NotImplementedError
self.frozen = frozen
# DF-KGW must use GDF integrals
if getattr(mf, 'with_df', None):
self.with_df = mf.with_df
else:
raise NotImplementedError
self._keys.update(['with_df'])
##################################################
# don't modify the following attributes, they are not input options
self._nocc = None
self._nmo = None
self.kpts = mf.kpts
self.nkpts = len(self.kpts)
# self.mo_energy: GW quasiparticle energy, not scf mo_energy
self.mo_energy = None
self.mo_coeff = mf.mo_coeff
self.mo_occ = mf.mo_occ
self.sigma = None
keys = set(('linearized','eta','fc'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('method = %s', self.__class__.__name__)
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
log.info('GW nocc = %d, nvir = %d, nkpts = %d', nocc, nvir, nkpts)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
logger.info(self, 'use perturbative linearized QP eqn = %s', self.linearized)
logger.info(self, 'GW finite size corrections = %s', self.fc)
return self
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
def kernel(self, mo_energy=None, mo_coeff=None, orbs=None, kptlist=None, nw=100):
"""
Input:
kptlist: self-energy k-points
orbs: self-energy orbs
nw: grid number
Output:
mo_energy: GW quasiparticle energy
"""
if mo_coeff is None:
mo_coeff = np.array(self._scf.mo_coeff)
if mo_energy is None:
mo_energy = np.array(self._scf.mo_energy)
nmo = self.nmo
naux = self.with_df.get_naoaux()
nkpts = self.nkpts
mem_incore = (2*nkpts*nmo**2*naux) * 16/1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now > 0.99*self.max_memory):
logger.warn(self, 'Memory may not be enough!')
raise NotImplementedError
cput0 = (logger.process_clock(), logger.perf_counter())
self.dump_flags()
self.converged, self.mo_energy, self.mo_coeff = \
kernel(self, mo_energy, mo_coeff, orbs=orbs,
kptlist=kptlist, nw=nw, verbose=self.verbose)
logger.warn(self, 'GW QP energies may not be sorted from min to max')
logger.timer(self, 'GW', *cput0)
return self.mo_energy
if __name__ == '__main__':
from pyscf.pbc import gto
from pyscf.pbc.lib import chkfile
import os
# This test takes a few minutes
cell = gto.Cell()
cell.build(unit = 'angstrom',
a = '''
0.000000 1.783500 1.783500
1.783500 0.000000 1.783500
1.783500 1.783500 0.000000
''',
atom = 'C 1.337625 1.337625 1.337625; C 2.229375 2.229375 2.229375',
dimension = 3,
max_memory = 8000,
verbose = 4,
pseudo = 'gth-pade',
basis='gth-szv',
precision=1e-10)
kpts = cell.make_kpts([3,1,1],scaled_center=[0,0,0])
gdf = df.GDF(cell, kpts)
gdf_fname = 'gdf_ints_311.h5'
gdf._cderi_to_save = gdf_fname
if not os.path.isfile(gdf_fname):
gdf.build()
chkfname = 'diamond_311.chk'
if os.path.isfile(chkfname):
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
data = chkfile.load(chkfname, 'scf')
kmf.__dict__.update(data)
else:
kmf = dft.KRKS(cell, kpts)
kmf.xc = 'pbe'
kmf.with_df = gdf
kmf.with_df._cderi = gdf_fname
kmf.conv_tol = 1e-12
kmf.chkfile = chkfname
kmf.kernel()
gw = KRGWCD(kmf)
gw.linearized = False
# without finite size corrections
gw.fc = False
nocc = gw.nocc
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.62045796))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.96574426))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.52639129))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-1.07442235))<1e-5)
# with finite size corrections
gw.fc = True
gw.kernel(kptlist=[0,1,2],orbs=range(0,nocc+3))
print(gw.mo_energy)
assert((abs(gw.mo_energy[0][nocc-1]-0.5427707))<1e-5)
assert((abs(gw.mo_energy[0][nocc]-0.80148557))<1e-5)
assert((abs(gw.mo_energy[1][nocc-1]-0.45073751))<1e-5)
assert((abs(gw.mo_energy[1][nocc]-0.92910117))<1e-5)
|
#!/usr/bin/env python
#
# Copyright (c) 2013-2016, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
import sys
import model
import config
from pygraph.classes.digraph import digraph
# --------------------------------------------------
# NetOS base class
class NetosMachine(model.Model):
"""A machine model that parses the NetOS machine database for machines
We intentionally did not use the NUMA model as a base class, which
was meant as a dirty hack to auto-generate the graph for a
machine. We can now (and should) do this directly from the
pairwise measurements.
Note: Currently this inherits from Model, but it should really
not. The initialization here is completely separate from the base
classes init code.
We currently also parse hierarchical information from various
Linux tools (lscpu, likwid). This is useful for hierarchical
models (such as clusters), but also virtualization.
"""
# Topology information as returned by the topology_parser
res = None
# Machine name
name = None
def __init__(self):
"""Initialize the Simulator for a NetOS machine.
Read topology information from the NetOS machine DB and build
a fully-connected graph representing the pairwise
message-passing communication cost (as a sum of send and
receive cost).
"""
self.name = config.args.machine
print ('Initializing NetOS machine %s' % self.name)
# Build a graph model
super(NetosMachine, self).__init__()
def get_name(self):
return self.name
def _build_graph(self):
"""Build a graph representing the communication cost within that
machine.
The cost of communication for each pair of cores is the
send_cost + receive_cost on that link. This is set as edge
weight and used by the schedulers when sorting
edges.
"""
_c_list = self.get_cores()
# Add all cores to the graph
gr = digraph()
gr.add_nodes(_c_list)
for snd in _c_list:
for rcv in _c_list:
if snd!=rcv:
snd_cost = self.get_send_cost(snd, rcv)
rcv_cost = self.get_receive_cost(snd, rcv)
gr.add_edge((snd, rcv), rcv_cost + snd_cost)
return gr
def __repr__(self):
return self.name
# --------------------------------------------------
# Static function
def get_list():
"""Get list of NetOS machines
"""
sys.path.append(config.MACHINE_DATABASE)
from machineinfo import machines
return [ s for (s, _, _) in machines ]
|
import os
import argparse
from em import molecule
from dataset.metrics import matching_iou
from evoseg import evolutionary_segmentation as evo_seg
from evoseg import miscellaneous as misc
import numpy as np
import pandas as pd
import pickle
import copy
import glob
MAX_NUMBER_SEGMENTS=60
if __name__ == '__main__':
''' ARGUMENTS
# --input_path imput mrc file path
# --label_path input index mrc file path
# --output_dir output directory path to save output
# --initial_size initial population size
# --n_mates number of combinations
# --p_mates probability of crossovers
# --p_split probability of split mutation
# --p_merge probability of merge mutation
# --patience number of iterations without succesive improvements to stop
# --top_n save top n trough generations
# --n_max max number of generations
'''
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', required=True, help= 'Input mrc directory path')
parser.add_argument('--output_path', required=True, help= 'Output directory path')
parser.add_argument('--level', required=True, help= 'Isosurface level')
#parser.add_argument('--label_path', required=True, help='Input mrc file path with labels')
parser.add_argument('--init_size', required=False, default=30, help= 'Initial population size')
parser.add_argument('--n_mates', required=False, default=25, help= 'Number of possible combinations')
parser.add_argument('--p_mates', required=False, default=0.5, help= 'Probability of crossover ocurrence')
parser.add_argument('--p_split', required=False, default=0.01, help= 'Probability of split mutation')
parser.add_argument('--p_merge', required=False, default=0.01, help= 'Probability of merge mutation')
parser.add_argument('--n_patience', required=False, default=30, help= 'Number of iterations without succesive improvements to stop')
parser.add_argument('--n_max', required=False, default=200, help= 'Max number of generations')
opt = parser.parse_args()
if not os.path.exists(opt.output_path):
os.mkdir(opt.output_path)
print('init')
classifier = pickle.load(open('classifier/classifier.pkl', 'rb'))
print("Loading maps from {}".format(opt.input_path))
filenames = glob.glob(opt.input_path)
for f in filenames:
mol = molecule.Molecule(f,0.01)
#mol_gt = molecule.Molecule(opt.label_path, 0.01)
data = mol.getDataAtContour(1)
#data_gt = mol_gt.getDataAtContour(1)
#data[data_gt==0]=0
mol.setData(data)
pop = evo_seg.init_population(30, MAX_NUMBER_SEGMENTS, [1,4], [1,4], mol)
save=False
counter = 0
run= True
pop_fitness = None
overall_best_score = 0
patience = 30 #int(opt.n_patience)
test_id = os.path.basename(opt.input_path).split('.')[0]
score_list = []
score_segmentation_list = []
top_5 = []
while(run):
ma, pa = evo_seg.select_parents(pop, 25) #int(opt.n_mates))
new_gen = evo_seg.mating(ma, pa, 0.5, 0.5, data)
mutated_pop = evo_seg.mutate(new_gen, 0.01, 0.01, [1,4], [1,4], mol)
pop_fitness = [ classifier.predict_proba(n['features'].reshape(1, -1))[0][1] for n in new_gen ]
sorted_idx = np.argsort(pop_fitness)
if len(top_5) > 0:
for t in top_5:
mutated_pop.append(t)
top_5 = [ copy.deepcopy(new_gen[sorted_idx[i]]) for i in range(-5,0) ]
pop_fitness = [ classifier.predict_proba(n['features'].reshape(1, -1))[0][1] for n in mutated_pop ]
sorted_idx = np.argsort(pop_fitness)
print("Population of gen {} fitness {}".format(counter,pop_fitness))
best_individual_score = np.max(pop_fitness)
print("***Optimizing {}, best score of generation {} is {}".format(test_id, counter, best_individual_score))
save = True if best_individual_score > overall_best_score else False
overall_best_score = best_individual_score if best_individual_score > overall_best_score else overall_best_score
score_list.append(best_individual_score)
current_segmentation = mutated_pop[sorted_idx[-1]]['labels']
if save:
print(" saving segmentation...")
save_path = os.path.join(opt.output_path,'best_{0}_{1:.2f}.npy'.format(test_id,best_individual_score))
np.save(save_path, current_segmentation)
patience = 30
else:
patience -= 1
pop = mutated_pop
run = False if ((counter >= 200) | (patience==0)) else True
counter += 1
# plot results
#plot_evolutionary_graph(score_list, score_segmentation_list, os.path.join(opt.output_path,'evolutionary_result'), 'Matching IoU')
|
#!/env/python
# -*- encoding: utf-8 -*-
"""
@version: 0.1
@author: wenzhiquan
@contact: wenzhiquanr@163.com
@site: http://github.wenzhiquan.com
@software: PyCharm
@file: __init__.py.py
@time: 15/12/7 15:17
@description: null
"""
def func():
pass
class Main():
def __init__(self):
pass
if __name__ == '__main__':
pass
|
from test_utils import run_query
def index_key(item):
return item['index']
def test_quadbin_kring_distances():
"""Computes kring"""
result = run_query('SELECT QUADBIN_KRING_DISTANCES(5209574053332910079, 1)')
expected = sorted(
[
{'index': 5208043533147045887, 'distance': 1},
{'index': 5209556461146865663, 'distance': 1},
{'index': 5209591645518954495, 'distance': 1},
{'index': 5208061125333090303, 'distance': 1},
{'index': 5209574053332910079, 'distance': 0},
{'index': 5209609237704998911, 'distance': 1},
{'index': 5208113901891223551, 'distance': 1},
{'index': 5209626829891043327, 'distance': 1},
{'index': 5209662014263132159, 'distance': 1},
],
key=index_key,
)
assert sorted(result[0][0], key=index_key) == expected
|
from django.http import Http404
from django.shortcuts import render, redirect
from haystack.generic_views import SearchView, FacetedSearchView, FacetedSearchMixin
from nuremberg.documents.models import Document
from .forms import DocumentSearchForm
from .lib.digg_paginator import DiggPaginator
from .lib.solr_grouping_backend import GroupedSearchQuerySet
class Search(FacetedSearchView):
"""
This is a subclass of the default Haystack faceted search view to implement
our modifications, including pagination, shorter query parameters, custom
sorting, and labeled facets.
You can add a search facet to the list simply by placing it in `facet_labels`.
See `forms.py` for the faceting and fielded search logic itself.
"""
load_all = False
queryset = GroupedSearchQuerySet()
# page numbers like [1, 2 ... 6, 7, 8, 9, 10, ... 19, 20]
paginator_class = DiggPaginator
paginate_by = 15
context_pages = 4
edge_pages = 2
form_class = DocumentSearchForm
search_field = 'q'
filter_field = 'f'
material_field = 'm'
sort_field = 'sort'
default_sort = 'relevance'
facet_labels = (
('Material Type', 'material_type'),
('Trial', 'case_names'),
('Defendant', 'defendants'),
('Date', 'date_year'),
('Author', 'authors'),
('Language', 'language'),
('Source', 'source'),
('Trial Issues', 'trial_activities'),
)
facet_to_label = {field: label for (label, field) in facet_labels}
facet_fields = [label[1] for label in facet_labels]
def get(self, *args, **kwargs):
try:
return super().get(*args, **kwargs)
except Http404:
if self.request.GET.get('page', 1) == 1:
raise
params = self.request.GET.copy()
del params['page']
return redirect('%s?%s' % (self.request.path, params.urlencode()))
def form_invalid(self, form):
# override SearchView to give a blank search by default
# TODO: this seems unnecessary
self.queryset = form.search()
context = self.get_context_data(**{
self.form_name: form,
'object_list': self.get_queryset()
})
return self.render_to_response(context)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({
'sort_results': self.request.GET.get(self.sort_field, self.default_sort),
'selected_facets': self.request.GET.getlist(self.filter_field),
'facet_to_label': self.facet_to_label
})
return kwargs
def get_queryset(self):
# override FacetedSearchMixin
qs = super(FacetedSearchMixin, self).get_queryset()
for field in self.facet_fields:
sort = 'count'
qs = qs.facet(field, missing=True, sort=sort, mincount=1)
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# pull the query out of form so it is pre-processed
context['query'] = context['form'].data.get('q') or '*'
if context['facets']:
labeled_facets = []
for (label, field) in self.facet_labels:
counts = context['facets']['fields'].get(field, [])
# missing ignores mincount and sorting
if (None, 0) in counts:
counts.remove((None, 0))
else:
pass
# sort missing into the other facet values
# counts.sort(key=lambda field: field[1], reverse=True)
labeled_facets.append({
'field': field,
'label': label,
'counts': counts
})
context.update({'labeled_facets': labeled_facets})
if context['form']:
context['facet_lookup'] = {}
for (field, value, facet) in context['form'].applied_filters:
context['facet_lookup'][facet] = True
if self.request.GET.get('partial'):
context['base_template'] = 'search/partial.html'
else:
context['base_template'] = None
return context
def get_paginator(self, *args, **kwargs):
return self.paginator_class(*args, body=self.context_pages, tail=self.edge_pages, **kwargs)
|
Y,X,d = [int(x) for x in input().split()]
arr = []
for _ in range(Y):
xs = [int(x) for x in input().split()]
arr.extend(xs)
arr.sort()
hi = max(arr)
for i in range(Y*X):
arr[i] -= hi
if all(map(lambda x: x % d == 0, arr)):
total = 0
med = arr[(Y*X) // 2]
# print(med)
for x in arr:
rem = abs(med-x)
total += rem // d
print(total)
else:
print("-1")
|
#!/usr/bin/env python3
plink = 'https://www.udemy.com/course/python-1000/?referralCode=D3A7B607149F46D12A28'
doc = f'''
> This file was created by [this](./missions.py) Python script.
Learn what Python scripts [can do]({plink}) for you!
'''
import os
ofile = './MISSIONS.md'
mfile = 'MISSION.md'
def get_mission(afile, fout):
prefix = '## '
with open(afile) as fh:
mission = prefix
mission += fh.read().split(prefix)[1].strip()
print(mission, end="\n\n", file=fout)
with open(ofile, 'w') as fh:
print(doc, "\n", file=fh)
for root, dirs, files in os.walk("./com"):
aroot = root.replace('\\', '/')
for file in files:
if file == mfile:
afile = aroot + "/" + file
get_mission(afile, fh)
print(f"> Go to [Mission]({afile})", file=fh)
print(file=fh)
|
# Copyright (c) ACSONE SA/NV 2021
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
"""Utilities to work with PEP 503 package indexes."""
import logging
import os
from io import StringIO
from pathlib import PosixPath
from subprocess import CalledProcessError
from typing import Iterator, Optional, Tuple
from urllib.parse import urljoin, urlparse
import requests
from lxml import etree
from .process import check_call
_logger = logging.getLogger(__name__)
def files_on_index(
index_url: str, project_name: str
) -> Iterator[Tuple[str, Optional[Tuple[str, str]]]]:
"""Iterate files available on an index for a given project name."""
project_name = project_name.replace("_", "-")
base_url = urljoin(index_url, project_name + "/")
r = requests.get(base_url)
if r.status_code == 404:
# project not found on this index
return
r.raise_for_status()
parser = etree.HTMLParser()
tree = etree.parse(StringIO(r.text), parser)
for a in tree.iterfind("//a"):
parsed_url = urlparse(a.get("href"))
p = PosixPath(parsed_url.path)
if parsed_url.fragment:
hash_type, hash_value = parsed_url.fragment.split("=", 2)[:2]
yield p.name, (hash_type, hash_value)
else:
yield p.name, None
def exists_on_index(index_url: str, filename: str) -> bool:
"""Check if a distribution exists on a package index."""
project_name = filename.split("-", 1)[0]
for filename_on_index, _ in files_on_index(index_url, project_name):
if filename_on_index == filename:
return True
return False
class DistPublisher:
def publish(self, dist_dir: str, dry_run: bool) -> None:
raise NotImplementedError()
class MultiDistPublisher(DistPublisher):
def __init__(self):
self._dist_publishers = []
def add(self, dist_publisher: DistPublisher) -> None:
self._dist_publishers.append(dist_publisher)
def publish(self, dist_dir: str, dry_run: bool) -> None:
for dist_publisher in self._dist_publishers:
dist_publisher.publish(dist_dir, dry_run)
class TwineDistPublisher:
def __init__(
self,
index_url: str,
repository_url: str,
username: str,
password: str,
):
self._index_url = index_url
self._repository_url = repository_url
self._username = username
self._password = password
def publish(self, dist_dir: str, dry_run: bool) -> None:
for filename in os.listdir(dist_dir):
if exists_on_index(self._index_url, filename):
_logger.info(
f"Not uploading {filename} that already exists "
f"on {self._repository_url}."
)
continue
_logger.info(f"Uploading {filename} to {self._repository_url}")
cmd = [
"twine",
"upload",
"--repository-url",
self._repository_url,
"-u",
self._username,
filename,
]
if dry_run:
_logger.info("DRY-RUN" + " ".join(cmd))
else:
_logger.info(" ".join(cmd))
try:
check_call(
cmd,
cwd=dist_dir,
env=dict(os.environ, TWINE_PASSWORD=self._password),
)
except CalledProcessError as e:
if (
"File already exists" in e.output
or "This filename has already been used" in e.output
):
# in case exist_on_index() received an outdated index page
_logger.warning(
f"Could not upload {filename} that already exists "
f"on {self._repository_url}."
)
else:
raise
class RsyncDistPublisher(DistPublisher):
def __init__(self, rsync_target):
self._rsync_target = rsync_target
def publish(self, dist_dir: str, dry_run: bool) -> None:
pkgname = _find_pkgname_in_dist_dir(dist_dir)
# --ignore-existing: never overwrite an existing package
# os.path.join: make sure directory names end with /
cmd = [
"rsync",
"-rv",
"--ignore-existing",
"--no-perms",
"--chmod=ugo=rwX",
os.path.join(dist_dir, ""),
os.path.join(self._rsync_target, pkgname, ""),
]
if dry_run:
_logger.info("DRY-RUN" + " ".join(cmd))
else:
_logger.info(" ".join(cmd))
check_call(cmd, cwd=".")
def _find_pkgname_in_dist_dir(dist_dir: str) -> str:
"""Find the package name by looking at .whl files"""
pkgname = None
for f in os.listdir(dist_dir):
if f.endswith(".whl"):
new_pkgname = f.split("-")[0].replace("_", "-")
if pkgname and new_pkgname != pkgname:
raise RuntimeError(f"Multiple packages names in {dist_dir}")
pkgname = new_pkgname
if not pkgname:
raise RuntimeError(f"Package name not found in {dist_dir}")
return pkgname
|
from com.hhj.pystock.search_engine.Bloomfilter import Bloomfilter
from com.hhj.pystock.search_engine.Segments import segments
class SplunkM(object):
def __init__(self):
self.bf = Bloomfilter(64)
self.terms = {} # Dictionary of term to set of events
self.events = []
def add_event(self, event):
"""Adds an event to this object"""
# Generate a unique ID for the event, and save it
event_id = len(self.events)
self.events.append(event)
# Add each term to the bloomfilter, and track the event by each term
for term in segments(event):
self.bf.add_value(term)
if term not in self.terms:
self.terms[term] = set()
self.terms[term].add(event_id)
def search_all(self, terms):
"""Search for an AND of all terms"""
# Start with the universe of all events...
results = set(range(len(self.events)))
for term in terms:
# If a term isn't present at all then we can stop looking
if not self.bf.might_contain(term):
return
if term not in self.terms:
return
# Drop events that don't match from our results
results = results.intersection(self.terms[term])
for event_id in sorted(results):
yield self.events[event_id]
def search_any(self, terms):
"""Search for an OR of all terms"""
results = set()
for term in terms:
# If a term isn't present, we skip it, but don't stop
if not self.bf.might_contain(term):
continue
if term not in self.terms:
continue
# Add these events to our results
results = results.union(self.terms[term])
for event_id in sorted(results):
yield self.events[event_id]
|
"""打印输出乘法表(内部函数)"""
upper = int(input('从1到几:'))
def multiplication_table(n: int) -> bool:
"""打印输出数字1~n的乘法表"""
def put_bar(n: int) -> None:
"""连续打印输出n个'-'并换行"""
print('-' * n)
if 1 <= n <= 3: w = 2
elif 4 <= n <= 9: w = 3
elif 10 <= n <= 31: w = 4
else : return False
f = '{{:{}d}}'.format(w)
put_bar(n * w)
for i in range(1, n + 1):
for j in range(1, n + 1):
print(f.format(i * j), end='')
print()
put_bar(n * w)
return True
multiplication_table(upper)
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyDaskMl(PythonPackage):
"""Scalable Machine Learning with Dask."""
homepage = "https://ml.dask.org/"
pypi = "dask-ml/dask-ml-1.8.0.tar.gz"
version('1.8.0', sha256='8fc4ac3ec1915e382fb8cae9ff1ec9b5ac1bee0b6f4c6975d6e6cb7191a4a815')
variant('docs', default=False, description='Build HTML documentation')
variant('xgboost', default=False, description='Deploys XGBoost alongside Dask')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-setuptools-scm', type='build')
depends_on('py-dask+array+dataframe@2.4.0:', type=('build', 'run'))
depends_on('py-distributed@2.4.0:', type=('build', 'run'))
depends_on('py-numba', type=('build', 'run'))
depends_on('py-numpy@1.17.3:', type=('build', 'run'))
depends_on('py-pandas@0.24.2:', type=('build', 'run'))
depends_on('py-scikit-learn@0.23:', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-dask-glm@0.2.0:', type=('build', 'run'))
depends_on('py-multipledispatch@0.4.9:', type=('build', 'run'))
depends_on('py-packaging', type=('build', 'run'))
depends_on('py-graphviz', type=('build', 'run'), when='+docs')
depends_on('py-heapdict', type=('build', 'run'), when='+docs')
depends_on('py-ipykernel', type=('build', 'run'), when='+docs')
depends_on('py-ipython', type=('build', 'run'), when='+docs')
depends_on('py-nbsphinx', type=('build', 'run'), when='+docs')
depends_on('py-nose', type=('build', 'run'), when='+docs')
depends_on('py-numpydoc', type=('build', 'run'), when='+docs')
depends_on('py-sortedcontainers', type=('build', 'run'), when='+docs')
depends_on('py-sphinx', type=('build', 'run'), when='+docs')
depends_on('py-sphinx-rtd-theme', type=('build', 'run'), when='+docs')
depends_on('py-sphinx-gallery', type=('build', 'run'), when='+docs')
depends_on('py-testpath', type=('build', 'run'), when='+docs')
depends_on('py-tornado', type=('build', 'run'), when='+docs')
depends_on('py-zict', type=('build', 'run'), when='+docs')
depends_on('py-dask-sphinx-theme@1.1.0:', type=('build', 'run'), when='+docs')
depends_on('py-nbsphinx', type=('build', 'run'), when='+docs')
depends_on('py-xgboost+dask', type=('build', 'run'), when='+docs')
depends_on('py-xgboost+dask', type=('build', 'run'), when='+xgboost')
patch('xgboost_dependency.patch')
conflicts('+docs', when='%gcc target=aarch64:')
@run_after('install')
def install_docs(self):
if '+docs' in self.spec:
with working_dir('docs'):
make('html')
install_tree('docs', self.prefix.docs)
|
#
# dcolumn/dcolumns/tests/urls.py
#
try:
from django.urls import re_path
except:
from django.conf.urls import url as re_path
from . import views
urlpatterns = [
re_path(r'^test-book-create/$', views.test_book_create_view,
name='test-book-create'),
re_path(r'^test-book-update/(?P<pk>\d+)/$', views.test_book_update_view,
name='test-book-update'),
re_path(r'^test-book-detail/(?P<pk>\d+)/$', views.test_book_detail_view,
name='test-book-detail'),
re_path(r'^test-book-list/$', views.test_book_list_view,
name='test-book-list'),
]
|
#input para pegar valor da variavel
valor = input('digite algo: ')
#variaveis gerais
tipo = type(valor)
espaco = valor.isspace()
numero = valor.isnumeric()
alfabetico = valor.isalpha()
alfanumerico = valor.isalnum()
maiscula = valor.isupper()
minuscula = valor.islower()
maiscula_e_minuscula = valor.istitle()
#transformar valor true e false em verdadeiro e falso
if espaco == True:
espaco = 'verdadeiro'
else:
espaco = 'falso'
if numero == True:
numero = 'verdadeiro'
else:
numero = 'falso'
if alfabetico == True:
alfabetico = 'verdadeiro'
else:
alfabetico = 'falso'
if alfanumerico == True:
alfanumerico = 'verdadeiro'
else:
alfanumerico = 'falso'
if maiscula == True:
maiscula = 'verdadeiro'
else:
maiscula = 'falso'
if minuscula == True:
minuscula = 'verdadeiro'
else:
minuscula = 'falso'
if maiscula_e_minuscula == True:
maiscula_e_minuscula = 'verdadeiro'
else:
maiscula_e_minuscula = 'falso'
#mostrar resultados na tela
print('O tipo primitivo desse valor é: {}'.format(tipo))
print('Só tem espaços? {}'.format(espaco))
print('É formado apenas por números? {}'.format(numero))
print('É formado apenas por letras? {}'.format(alfabetico))
print('É formado apenas por letras ou números? {}'.format(alfanumerico))
print('Está somente em maiusculas? {}'.format(maiscula))
print('Está somente em minusculas? {}'.format(minuscula))
print('Está em maiusculas e minusculas? {}'.format(maiscula_e_minuscula))
|
import ctypes
import numpy as np
from AnyQt.QtCore import QRectF
from pyqtgraph.graphicsItems.ImageItem import ImageItem
# load the C++ library; The _grid_density is build and distributed as a
# python extension but does not export any python objects (apart from PyInit),
from . import _grid_density
lib = ctypes.pydll.LoadLibrary(_grid_density.__file__)
# compute the color/class density image
def class_density_image(
min_x, max_x, min_y, max_y, resolution, x_data, y_data, rgb_data
):
x_sz = (max_x - min_x) / (resolution - 1)
y_sz = (max_y - min_y) / (resolution - 1)
x_grid = [min_x + i * x_sz for i in range(resolution)]
y_grid = [min_y + i * y_sz for i in range(resolution)]
n_points = len(x_data)
sample = range(n_points)
if n_points > 1000:
sample = grid_sample(x_data, y_data, 1000)
x_data_norm = (np.array(x_data) - min_x) / (max_x - min_x)
y_data_norm = (np.array(y_data) - min_y) / (max_y - min_y)
x_grid_norm = (np.array(x_grid) - min_x) / (max_x - min_x)
y_grid_norm = (np.array(y_grid) - min_y) / (max_y - min_y)
img = compute_density(
x_grid_norm,
y_grid_norm,
x_data_norm[sample],
y_data_norm[sample],
np.array(rgb_data)[sample],
)
density_img = ImageItem(img.astype(np.uint8), autoLevels=False)
density_img.setRect(
QRectF(
min_x - x_sz / 2,
min_y - y_sz / 2,
max_x - min_x + x_sz,
max_y - min_y + y_sz,
)
)
density_img.setZValue(-1)
return density_img
# call C++ implementation
def compute_density(x_grid, y_grid, x_data, y_data, rgb_data):
fun = lib.compute_density
fun.restype = None
fun.argtypes = [
ctypes.c_int,
np.ctypeslib.ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
ctypes.c_int,
np.ctypeslib.ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(ctypes.c_int, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(ctypes.c_int, flags="C_CONTIGUOUS"),
]
gx = np.ascontiguousarray(x_grid, dtype=np.float64)
gy = np.ascontiguousarray(y_grid, dtype=np.float64)
dx = np.ascontiguousarray(x_data, dtype=np.float64)
dy = np.ascontiguousarray(y_data, dtype=np.float64)
drgb = np.ascontiguousarray(rgb_data, dtype=np.int32)
resolution = len(x_grid)
n_points = len(x_data)
img = np.ascontiguousarray(np.zeros((resolution, resolution, 4)), dtype=np.int32)
fun(resolution, gx, gy, n_points, dx, dy, drgb, img)
img = np.swapaxes(img, 0, 1)
return img
# sample k data points from a uniformly spaced g*g grid of buckets
def grid_sample(x_data, y_data, k=1000, g=10):
n = len(x_data)
min_x, max_x = min(x_data), max(x_data)
min_y, max_y = min(y_data), max(y_data)
dx, dy = (max_x - min_x) / g, (max_y - min_y) / g
grid = [[[] for j in range(g)] for i in range(g)]
for i in range(n):
y = int(min((y_data[i] - min_y) / dy, g - 1))
x = int(min((x_data[i] - min_x) / dx, g - 1))
grid[y][x].append(i)
for y in range(g):
for x in range(g):
np.random.shuffle(grid[y][x])
sample = []
while len(sample) < k:
for y in range(g):
for x in range(g):
if len(grid[y][x]) != 0:
sample.append(grid[y][x].pop())
np.random.shuffle(sample)
return sample[:k]
|
# Generated by Django 3.1.1 on 2020-09-14 04:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organization', '0002_auto_20200913_1430'),
]
operations = [
migrations.RemoveField(
model_name='organization',
name='group',
),
]
|
from distutils.core import setup, Extension, Command
from distutils import sysconfig
from distutils.command.build_ext import build_ext
import os, shutil, re, subprocess
class my_build_ext (build_ext):
def run(self):
cmd = self.reinitialize_command('build_dylib')
cmd.run()
build_ext.run(self)
class build_dylib (Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def get_arch_flags(self):
cflags = sysconfig.get_config_var('CFLAGS')
result = []
for item in re.findall('(-arch\s+\S+)', cflags):
result.extend(item.split())
return result
def run(self):
print("running build_dylib")
bdir = 'build/libdir'
if os.path.exists(bdir):
shutil.rmtree(bdir)
os.makedirs(bdir)
cflags = self.get_arch_flags()
cc = sysconfig.get_config_var('CC')
subprocess.check_call([cc] + cflags + [
'-c', '-o', os.path.join(bdir, 'libfoo.o'),
'src/libfoo.c'])
subprocess.check_call(['libtool',
'-dynamic', '-o', os.path.join(bdir, 'libfoo.dylib'),
'-install_name', os.path.abspath(os.path.join(bdir, 'libfoo.dylib')),
os.path.join(os.path.join(bdir, 'libfoo.o'))])
setup(
cmdclass = {
'build_dylib': build_dylib,
'build_ext': my_build_ext,
},
ext_modules=[
Extension("foo", ["src/modfoo.c"],
extra_link_args=["-L%s"%(os.path.abspath("build/libdir"),), "-lfoo"])
],
options={
'build_ext': {
'inplace': True
},
},
)
|
from lx16a import *
from math import sin, cos, pi
import time
import xlwt
import pandas as pd
import numpy as np
from xlwt import Workbook
class RecordMotorData():
def __init__ (self, servo):
self.servo = servo;
self.id = [];
self.angleOffset = [];
self.physicalPos = [];
self.virtualPos = [];
self.temp = [];
self.voltage = [];
def record(self):
self.id.append(int(self.servo.IDRead()))
self.angleOffset.append(int(self.servo.angleOffsetRead()))
self.physicalPos.append(int(self.servo.getPhysicalPos()))
self.virtualPos.append(int(self.servo.getVirtualPos()))
self.temp.append(int(self.servo.tempRead()))
self.voltage.append(int(self.servo.vInRead()))
def save2CSV(recordMotorDataList):
id = [];
angleOffset = [];
physicalPos = [];
virtualPos = [];
temp = [];
voltage = [];
for recordMotorData in recordMotorDataList:
id.extend(recordMotorData.id);
angleOffset.extend(recordMotorData.angleOffset);
physicalPos.extend(recordMotorData.physicalPos);
virtualPos.extend(recordMotorData.virtualPos);
temp.extend(recordMotorData.temp);
voltage.extend(recordMotorData.voltage);
df = pd.DataFrame(list(zip(id, angleOffset, physicalPos, virtualPos, temp, voltage)), columns = ["Id", "Angle offset", "Physical pos", "Virtual pos", "Temp", "Voltage"])
df.to_csv(r'MotorData.csv')
def initializeMotor(servo1):
targetPos = 120;
initialPos = servo1.getPhysicalPos();
error = targetPos-initialPos
print(servo1.IDRead())
print(initialPos)
print(error)
t=0;
while (abs(sin(t*2*pi/360)*error) < abs(error)):
# print("Motor id is ", servo1.IDRead())
# print("Physical pos is ", servo1.getPhysicalPos())
# print("Virtual pos is ", servo1.getVirtualPos())
servo1.moveTimeWrite(sin(t*2*pi/360)*error+initialPos)
time.sleep(.01)
t+=3
def resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E):
initializeMotor(servoL1);
initializeMotor(servoL2);
initializeMotor(servoA1S);
initializeMotor(servoA1E);
initializeMotor(servoA2S);
initializeMotor(servoA2E);
def danceLegs1(servo1, servo2,cycles, relativeAngleStep, r1, r2):
print("Starting dance 1")
direction = 1;
for i in range(0,cycles):
t=0;
initialPos1 = servo1.getPhysicalPos();
initialPos2 = servo2.getPhysicalPos();
while (t<180):
r1.record();
r2.record();
# print(sin(t*2*pi/360)*relativeAngleStep)
# print("Motor id is ", servo1.IDRead())
# print("Physical pos is ", servo1.getPhysicalPos())
# print("Virtual pos is ", servo1.getVirtualPos())
# print("Motor id is ", servo2.IDRead())
# print("Physical pos is ", servo2.getPhysicalPos())
# print("Virtual pos is ", servo2.getVirtualPos())
servo1.moveTimeWrite(direction*sin(t*3*pi/360)*relativeAngleStep+initialPos1);
servo2.moveTimeWrite(direction*sin(t*3*pi/360)*relativeAngleStep+initialPos2);
time.sleep(.01)
t+=5;
direction= -direction;
def turnL1A2(servoLeg1, servoLeg2, servoArm1S, servoArm1E, servoArm2S, servoArm2E,
cycles, angleStepLeg, angleStepShoulder, angleStepElbow, r1, r2):
print("Starting dance legs with arms")
direction = 1;
for i in range(0,cycles):
t=0;
initialPosL1 = servoLeg1.getPhysicalPos();
initialPosL2 = servoLeg2.getPhysicalPos();
initialPosA1S = servoArm1S.getPhysicalPos();
initialPosA1E = servoArm1E.getPhysicalPos();
initialPosA2S = servoArm2S.getPhysicalPos();
initialPosA2E = servoArm2E.getPhysicalPos();
while (t<180):
r1.record();
r2.record();
# print(sin(t*2*pi/360)*angleStepLeg)
# print("Motor id is ", servoLeg1.IDRead())
# print("Physical pos is ", servoLeg1.getPhysicalPos())
# print("Virtual pos is ", servoLeg1.getVirtualPos())
# print("Motor id is ", servoLeg2.IDRead())
# print("Physical pos is ", servoLeg2.getPhysicalPos())
# print("Virtual pos is ", servoLeg2.getVirtualPos())
servoLeg1.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL1);
#servoLeg2.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL2);
#servoArm1S.moveTimeWrite(direction*sin(4*t*2*pi/360)*angleStepShoulder+initialPosA1S);
#servoArm1E.moveTimeWrite(direction*sin(4*t*2*pi/360)*angleStepElbow+initialPosA1E);
servoArm2S.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA2S);
servoArm2E.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA2E);
time.sleep(.01)
t+=5;
direction= -direction;
resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E)
def turnL1A1(servoLeg1, servoLeg2, servoArm1S, servoArm1E, servoArm2S, servoArm2E,
cycles, angleStepLeg, angleStepShoulder, angleStepElbow, r1, r2):
print("Starting dance legs with arms")
direction = 1;
for i in range(0,cycles):
t=0;
initialPosL1 = servoLeg1.getPhysicalPos();
initialPosL2 = servoLeg2.getPhysicalPos();
initialPosA1S = servoArm1S.getPhysicalPos();
initialPosA1E = servoArm1E.getPhysicalPos();
initialPosA2S = servoArm2S.getPhysicalPos();
initialPosA2E = servoArm2E.getPhysicalPos();
while (t<180):
r1.record();
r2.record();
# print(sin(t*2*pi/360)*angleStepLeg)
# print("Motor id is ", servoLeg1.IDRead())
# print("Physical pos is ", servoLeg1.getPhysicalPos())
# print("Virtual pos is ", servoLeg1.getVirtualPos())
# print("Motor id is ", servoLeg2.IDRead())
# print("Physical pos is ", servoLeg2.getPhysicalPos())
# print("Virtual pos is ", servoLeg2.getVirtualPos())
servoLeg1.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL1);
#servoLeg2.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL2);
servoArm1S.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA1S);
servoArm1E.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA1E);
# servoArm2S.moveTimeWrite(-direction*sin(4*t*2*pi/360)*angleStepShoulder+initialPosA2S);
# servoArm2E.moveTimeWrite(-direction*sin(4*t*2*pi/360)*angleStepElbow+initialPosA2E);
time.sleep(.01)
t+=5;
direction= -direction;
resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E)
def turnL2A1(servoLeg1, servoLeg2, servoArm1S, servoArm1E, servoArm2S, servoArm2E,
cycles, angleStepLeg, angleStepShoulder, angleStepElbow, r1, r2):
print("Starting dance legs with arms")
direction = 1;
for i in range(0,cycles):
t=0;
initialPosL1 = servoLeg1.getPhysicalPos();
initialPosL2 = servoLeg2.getPhysicalPos();
initialPosA1S = servoArm1S.getPhysicalPos();
initialPosA1E = servoArm1E.getPhysicalPos();
initialPosA2S = servoArm2S.getPhysicalPos();
initialPosA2E = servoArm2E.getPhysicalPos();
while (t<180):
r1.record();
r2.record();
# print(sin(t*2*pi/360)*angleStepLeg)
# print("Motor id is ", servoLeg1.IDRead())
# print("Physical pos is ", servoLeg1.getPhysicalPos())
# print("Virtual pos is ", servoLeg1.getVirtualPos())
# print("Motor id is ", servoLeg2.IDRead())
# print("Physical pos is ", servoLeg2.getPhysicalPos())
# print("Virtual pos is ", servoLeg2.getVirtualPos())
#servoLeg1.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL1);
servoLeg2.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL2);
servoArm1S.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA1S);
servoArm1E.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA1E);
#servoArm2S.moveTimeWrite(direction*sin(4*t*2*pi/360)*angleStepShoulder+initialPosA2S);
#servoArm2E.moveTimeWrite(direction*sin(4*t*2*pi/360)*angleStepElbow+initialPosA2E);
time.sleep(.01)
t+=5;
direction= -direction;
resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E)
def turnL2A2(servoLeg1, servoLeg2, servoArm1S, servoArm1E, servoArm2S, servoArm2E,
cycles, angleStepLeg, angleStepShoulder, angleStepElbow, r1, r2):
print("Starting dance legs with arms")
direction = 1;
for i in range(0,cycles):
t=0;
initialPosL1 = servoLeg1.getPhysicalPos();
initialPosL2 = servoLeg2.getPhysicalPos();
initialPosA1S = servoArm1S.getPhysicalPos();
initialPosA1E = servoArm1E.getPhysicalPos();
initialPosA2S = servoArm2S.getPhysicalPos();
initialPosA2E = servoArm2E.getPhysicalPos();
while (t<180):
r1.record();
r2.record();
# print(sin(t*2*pi/360)*angleStepLeg)
# print("Motor id is ", servoLeg1.IDRead())
# print("Physical pos is ", servoLeg1.getPhysicalPos())
# print("Virtual pos is ", servoLeg1.getVirtualPos())
# print("Motor id is ", servoLeg2.IDRead())
# print("Physical pos is ", servoLeg2.getPhysicalPos())
# print("Virtual pos is ", servoLeg2.getVirtualPos())
#servoLeg1.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL1);
servoLeg2.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL2);
# servoArm1S.moveTimeWrite(-direction*sin(4*t*2*pi/360)*angleStepShoulder+initialPosA1S);
# servoArm1E.moveTimeWrite(-direction*sin(4*t*2*pi/360)*angleStepElbow+initialPosA1E);
servoArm2S.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA2S);
servoArm2E.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA2E);
time.sleep(.01)
t+=5;
direction= -direction;
resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E)
def danceArmsAlternate(servoLeg1, servoLeg2, servoArm1S, servoArm1E, servoArm2S, servoArm2E,
cycles, angleStepLeg, angleStepShoulder, angleStepElbow, r1, r2):
print("Starting dance legs with arms")
direction = 1;
for i in range(0,cycles):
t=0;
initialPosL1 = servoLeg1.getPhysicalPos();
initialPosL2 = servoLeg2.getPhysicalPos();
initialPosA1S = servoArm1S.getPhysicalPos();
initialPosA1E = servoArm1E.getPhysicalPos();
initialPosA2S = servoArm2S.getPhysicalPos();
initialPosA2E = servoArm2E.getPhysicalPos();
while (t<180):
r1.record();
r2.record();
# print(sin(t*2*pi/360)*angleStepLeg)
# print("Motor id is ", servoLeg1.IDRead())
# print("Physical pos is ", servoLeg1.getPhysicalPos())
# print("Virtual pos is ", servoLeg1.getVirtualPos())
# print("Motor id is ", servoLeg2.IDRead())
# print("Physical pos is ", servoLeg2.getPhysicalPos())
# print("Virtual pos is ", servoLeg2.getVirtualPos())
# servoLeg1.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL1);
# servoLeg2.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL2);
servoArm1S.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA1S);
servoArm1E.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA1E);
time.sleep(.01)
servoArm2S.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA2S);
servoArm2E.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA2E);
time.sleep(.01)
t+=5;
direction= -direction;
resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E)
def danceArmsSynchronized(servoLeg1, servoLeg2, servoArm1S, servoArm1E, servoArm2S, servoArm2E,
cycles, angleStepLeg, angleStepShoulder, angleStepElbow, r1, r2):
print("Starting dance legs with arms")
direction = 1;
for i in range(0,cycles):
t=0;
initialPosL1 = servoLeg1.getPhysicalPos();
initialPosL2 = servoLeg2.getPhysicalPos();
initialPosA1S = servoArm1S.getPhysicalPos();
initialPosA1E = servoArm1E.getPhysicalPos();
initialPosA2S = servoArm2S.getPhysicalPos();
initialPosA2E = servoArm2E.getPhysicalPos();
while (t<180):
r1.record();
r2.record();
# print(sin(t*2*pi/360)*angleStepLeg)
# print("Motor id is ", servoLeg1.IDRead())
# print("Physical pos is ", servoLeg1.getPhysicalPos())
# print("Virtual pos is ", servoLeg1.getVirtualPos())
# print("Motor id is ", servoLeg2.IDRead())
# print("Physical pos is ", servoLeg2.getPhysicalPos())
# print("Virtual pos is ", servoLeg2.getVirtualPos())
# servoLeg1.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL1);
# servoLeg2.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL2);
servoArm1S.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA1S);
servoArm1E.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA1E);
time.sleep(.01)
servoArm2S.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA2S);
servoArm2E.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA2E);
time.sleep(.01)
t+=5;
direction= -direction;
resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E)
def danceLegsAndArms1(servoLeg1, servoLeg2, servoArm1S, servoArm1E, servoArm2S, servoArm2E,
cycles, angleStepLeg, angleStepShoulder, angleStepElbow, r1, r2):
print("Starting dance legs with arms")
direction = 1;
for i in range(0,cycles):
t=0;
initialPosL1 = servoLeg1.getPhysicalPos();
initialPosL2 = servoLeg2.getPhysicalPos();
initialPosA1S = servoArm1S.getPhysicalPos();
initialPosA1E = servoArm1E.getPhysicalPos();
initialPosA2S = servoArm2S.getPhysicalPos();
initialPosA2E = servoArm2E.getPhysicalPos();
while (t<180):
r1.record();
r2.record();
# print(sin(t*2*pi/360)*angleStepLeg)
# print("Motor id is ", servoLeg1.IDRead())
# print("Physical pos is ", servoLeg1.getPhysicalPos())
# print("Virtual pos is ", servoLeg1.getVirtualPos())
# print("Motor id is ", servoLeg2.IDRead())
# print("Physical pos is ", servoLeg2.getPhysicalPos())
# print("Virtual pos is ", servoLeg2.getVirtualPos())
servoLeg1.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL1);
servoLeg2.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL2);
servoArm1S.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA1S);
servoArm1E.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA1E);
servoArm2S.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA2S);
servoArm2E.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA2E);
time.sleep(.01)
t+=5;
direction= -direction;
resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E)
def danceLegsAndArms2(servoLeg1, servoLeg2, servoArm1S, servoArm1E, servoArm2S, servoArm2E,
cycles, angleStepLeg, angleStepShoulder, angleStepElbow, r1, r2):
print("Starting dance legs with arms")
direction = 1;
for i in range(0,cycles):
t=0;
initialPosL1 = servoLeg1.getPhysicalPos();
initialPosL2 = servoLeg2.getPhysicalPos();
initialPosA1S = servoArm1S.getPhysicalPos();
initialPosA1E = servoArm1E.getPhysicalPos();
initialPosA2S = servoArm2S.getPhysicalPos();
initialPosA2E = servoArm2E.getPhysicalPos();
while (t<180):
r1.record();
r2.record();
# print(sin(t*2*pi/360)*angleStepLeg)
# print("Motor id is ", servoLeg1.IDRead())
# print("Physical pos is ", servoLeg1.getPhysicalPos())
# print("Virtual pos is ", servoLeg1.getVirtualPos())
# print("Motor id is ", servoLeg2.IDRead())
# print("Physical pos is ", servoLeg2.getPhysicalPos())
# print("Virtual pos is ", servoLeg2.getVirtualPos())
servoLeg1.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL1);
servoLeg2.moveTimeWrite(direction*sin(t*2*pi/360)*angleStepLeg+initialPosL2);
servoArm1S.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA1S);
servoArm1E.moveTimeWrite(direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA1E);
servoArm2S.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepShoulder+initialPosA2S);
servoArm2E.moveTimeWrite(-direction*sin(2*t*2*pi/360)*angleStepElbow+initialPosA2E);
time.sleep(.01)
t+=5;
direction= -direction;
resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E)
# This is the port that the controller board is connected to
# This will be different for different computers
# On Windows, try the ports COM1, COM2, COM3, etc...
# On Raspbian, try each port in /dev/
try:
LX16A.initialize("COM9")
servoL1 = LX16A(7)
servoL2 = LX16A(8)
servoA1S = LX16A(2)
servoA1E = LX16A(1)
servoA2S = LX16A(4)
servoA2E = LX16A(3)
servoL1.motorMode(800)
servoL2.motorMode(-800)
time.sleep(3)
servoL1.servoMode()
servoL2.servoMode()
print("Resetting to home position")
resetAllMotors(servoL1, servoL2, servoA1S, servoA1E, servoA2S, servoA2E)
print("Finished resetting")
except KeyboardInterrupt:
quit()
|
from django.views.generic import TemplateView
class HistorySupportTabTemplateView(TemplateView):
template_name = "history_support/base.html"
class HistorySupportTab1TemplateView(TemplateView):
template_name = "history_support/frags/tab_1.html"
class HistorySupportTab2TemplateView(TemplateView):
template_name = "history_support/frags/tab_2.html"
class HistorySupportTab3TemplateView(TemplateView):
template_name = "history_support/frags/tab_3.html"
|
from Crypto.PublicKey import RSA
from Crypto.Util.number import *
#e, n, c
params = open("parameters.txt").read().split("\n")
for l in params:
if len(l) == 0:
continue
exec(l)
'''
#export public key pair to public.pem
public_key = RSA.construct((n, e))
with open("public.pem", "w") as f:
f.write(public_key.exportKey(format='PEM'))
'''
'''
generate private.pem by using RsaCtfTool
'''
privdata = open("private.pem").read()
private_key = RSA.importKey(privdata)
print private_key.decrypt(long_to_bytes(c))
|
#
# (c) 2017, Forcepoint
# Documentation fragment. This fragment specifies the top level
# requirements for obtaining a valid session to the Forcepoint NGFW Management
# Center.
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
filter:
description:
- String value to match against when making query. Matches all if not specified.
A filter will attempt to find a match in the name, primary key field or comment
field of a given record.
required: false
default: '*'
type: str
limit:
description:
- Limit the number of results. Set to 0 to remove limit.
required: false
default: 10
type: int
exact_match:
description:
- Whether to do an exact match on the filter specified
required: false
default: false
case_sensitive:
description:
- Whether to do a case sensitive match on the filter specified
required: false
default: true
notes:
- If a filter is not used in the query, this will return all results for the
element type specified. The return data in this case will only contain the metadata
for the element which will be name and type. To get detailed information about an
element, use a filter. When using filters on network or service elements, the filter
value will search the element fields, for example, you could use a filter of '1.1.1.1'
when searching for hosts and all hosts with this IP will be returned. The same applies
for services. If you are unsure of the service name but know the port you require, your
filter can be by port.
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : github.py.py
# @Author: guq
# @Date : 2022/4/19
# @Desc :
import base64
import hashlib
from core.uploader_base import BaseUploader, init_server_decor
class GithubUploader(BaseUploader):
name = 'github'
is_repo = True
def __init__(self, access_token, owner, repo, branch, store_path, is_use_jsdelivr=True):
self.access_token = access_token
self.owner = owner.lower()
self.repo = repo
self.branch = branch
self.store_path = store_path
self.is_use_jsdelivr = is_use_jsdelivr # jsdelivr CDN加速
self.headers = {
"Authorization": "token %s" % access_token,
"Accept": "application/vnd.github.v3+json"
}
super().__init__()
async def format_pic_url(self, filename):
fullname = filename
# fullname = SQLiteModel.get_fullname_by_name(filename, upload_way=self.name)
if not self.is_use_jsdelivr:
# https://raw.githubusercontent.com/EngiGu/resources/images/2.txt
path = 'https://raw.githubusercontent.com/{owner}/{repo}/{branch}/{path}/{fullname}'.format(
owner=self.owner,
repo=self.repo,
path=self.store_path,
fullname=fullname,
branch=self.branch
)
else:
# https://cdn.jsdelivr.net/gh/engigu/ReadLogs/static/logviewer.gif
path = 'https://cdn.jsdelivr.net/gh/{owner}/{repo}@{branch}/{path}/{fullname}'.format(
owner=self.owner,
repo=self.repo,
path=self.store_path,
fullname=fullname,
branch=self.branch
)
return path.replace('///', '/')
def git_blob_hash(self, data):
if isinstance(data, str):
data = data.encode()
data = b'blob ' + str(len(data)).encode() + b'\0' + data
h = hashlib.sha1()
h.update(data)
return h.hexdigest()
async def upload(self, file, filename, raw_filename):
# file 二进制文件
file_content = base64.b64encode(file).decode()
sha = self.git_blob_hash(file)
url = 'https://api.github.com/repos/{owner}/{repo}/contents/{path}/{fullname}'.format(
owner=self.owner, repo=self.repo, path=self.store_path, fullname=filename
).replace('///', '/')
kwargs = {
'url': url,
'json': {
"message": self.format_upload_info(filename),
"committer": {
"name": "image bot",
"email": "image_bot@sooko.club"
},
"content": file_content,
"branch": self.branch,
'sha': sha
},
'headers': self.headers
}
return await self.send_requstes('PUT', **kwargs)
async def deal_upload_result(self, result, filename):
# 处理上传结果
url = await self.format_pic_url(filename)
need_add_record = False
if 'committer' not in str(result):
# 上传出现异常
return -1, str(result), str(result), need_add_record
else:
# 结果正常
need_add_record = True
return 0, '上传成功!', url, need_add_record
async def get_all_blob_tree(self):
# 主要是获取所有的blob目录
kwargs = {
'url': 'https://api.github.com/repos/{owner}/{repo}/git/trees/{branch}?recursive=1'.format(
owner=self.owner, repo=self.repo, branch=self.branch, access_token=self.access_token,
path=self.store_path
),
}
return await self.send_requstes('GET', **kwargs)
async def processing_tree_data(self, tree):
result = []
for blob in tree:
path = blob.get('path', '')
filename = path.split("/")[-1]
if len(filename.split('.')[0]) == 32:
result.append({'name': filename, 'fullname': path})
return result
async def do_data(self):
result = await self.get_all_blob_tree()
result = await self.processing_tree_data(result.get('tree', []))
return result
@init_server_decor
async def init_server(self, sqlite_model):
# 初始化
print('2. starting pull blob images...')
result = await self.do_data()
i = 0
for file in result:
sqlite_model.add_one_record(name=file['name'], upload_way=self.name, fullname=file['fullname'])
i += 1
print('3.. complete all recrod to sqlite [%s/%s]' % (i, i), end='\r')
print('\nall done.', )
|
#from .sparseDatasetLoader import SparseDatasetLocator, SparseDataset
from .denseDatasetLoaderHDF5_v2 import DenseDatasetFromSamples_v2
from .adaptiveDatasetLoader import getSamplePatternCrops, AdaptiveDataset
from .datasetUtils import getCropsForDataset, Normalization, getNormalizationForDataset
from .stepsizeDatasetLoader import StepsizeDataset
|
from pydantic import BaseModel
class Configuration(BaseModel):
latitude: str
longitude: str
|
import cv2
import numpy as np
import math
import socket
import time
UDP_IP = "localhost"
UDP_PORT = 5065
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
time_counter = 0 # For counting time
frequency_tracker_left = [0]*6
frequency_tracker_right = [0]*6
def get_max(arr):
e = -1
freq = -123456
for i in range (0,len(arr),1):
if arr[i] > freq:
e = i
freq = arr[i]
return e
cap = cv2.VideoCapture(0)
begin_time = time.time()
while(1):
#sending gesture data to server
time_counter = time.time() - begin_time
#print(time_counter)
if time_counter > 2:
print ("Time exceeded")
left = get_max(frequency_tracker_left)
right = get_max(frequency_tracker_right)
txt = "left = "+str(left) + " right = "+str(right)
if left != 0 or right != 0:
print(txt)
#sock.sendto( txt.encode(), (UDP_IP, UDP_PORT))
sock.sendto( bytes(txt,'utf-8'), (UDP_IP, UDP_PORT))
frequency_tracker_left = [0]*6
frequency_tracker_right = [0]*6
begin_time = time.time()
try: #an error comes if it does not find anything in window as it cannot find contour of max area
#therefore this try error statement
ret, frame = cap.read()
frame=cv2.flip(frame,1)
kernel = np.ones((3,3),np.uint8)
#define region of interest
roi=frame[00:300, 00:200] #[horiozontal,vertical]
cv2.rectangle(frame,(00,00),(200,300),(0,255,0),0) #(x,y) x is horizontal
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# define range of skin color in HSV
lower_skin = np.array([0,20,70], dtype=np.uint8)
upper_skin = np.array([20,255,255], dtype=np.uint8)
#extract skin colur imagw
mask = cv2.inRange(hsv, lower_skin, upper_skin)
#extrapolate the hand to fill dark spots within
mask = cv2.dilate(mask,kernel,iterations = 4)
#blur the image
mask = cv2.GaussianBlur(mask,(5,5),100)
#find contours
_,contours,hierarchy= cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#find contour of max area(hand)
cnt = max(contours, key = lambda x: cv2.contourArea(x))
#approx the contour a little
epsilon = 0.0005*cv2.arcLength(cnt,True)
approx= cv2.approxPolyDP(cnt,epsilon,True)
#make convex hull around hand
hull = cv2.convexHull(cnt)
#define area of hull and area of hand
areahull = cv2.contourArea(hull)
areacnt = cv2.contourArea(cnt)
#find the percentage of area not covered by hand in convex hull
arearatio=((areahull-areacnt)/areacnt)*100
#find the defects in convex hull with respect to hand
hull = cv2.convexHull(approx, returnPoints=False)
defects = cv2.convexityDefects(approx, hull)
# l = no. of defects
l=0
#code for finding no. of defects due to fingers
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(approx[s][0])
end = tuple(approx[e][0])
far = tuple(approx[f][0])
pt= (100,180)
# find length of all sides of triangle
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
s = (a+b+c)/2
ar = math.sqrt(s*(s-a)*(s-b)*(s-c))
#distance between point and convex hull
d=(2*ar)/a
# apply cosine rule here
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
# ignore angles > 90 and ignore points very close to convex hull(they generally come due to noise)
if angle <= 90 and d>30:
l += 1
cv2.circle(roi, far, 3, [255,0,0], -1)
#draw lines around hand
cv2.line(roi,start, end, [0,255,0], 2)
l+=1
#print corresponding gestures which are in their ranges
font = cv2.FONT_HERSHEY_SIMPLEX
if l==1:
if areacnt<2000:
cv2.putText(frame,'Put hand in the box',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left Put hand in the box")
else:
if arearatio<12:
cv2.putText(frame,'0',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left 0")
frequency_tracker_left[0] += 1
elif arearatio<17.5:
cv2.putText(frame,'Best of luck',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left Best of luck")
else:
cv2.putText(frame,'1',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
frequency_tracker_left[1] += 1
#print("left ",1)
elif l==2:
cv2.putText(frame,'2',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left ",2)
frequency_tracker_left[2] += 1
elif l==3:
if arearatio<27:
cv2.putText(frame,'3',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left ",3)
frequency_tracker_left[3] += 1
else:
cv2.putText(frame,'ok',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left ok,")
elif l==4:
cv2.putText(frame,'4',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left ",4)
frequency_tracker_left[4] += 1
elif l==5:
cv2.putText(frame,'5',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left ",5)
frequency_tracker_left[5] += 1
elif l==6:
cv2.putText(frame,'reposition',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left reposition")
else :
cv2.putText(frame,'reposition',(10,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#print("left reposition")
#show the windows
cv2.imshow('mask',mask)
cv2.imshow('frame',frame)
#for second hand
ret, frame = cap.read()
frame=cv2.flip(frame,1)
kernel = np.ones((3,3),np.uint8)
#define region of interest
roi=frame[00:300, 450:650] #[vertical,horizontal]
cv2.rectangle(frame,(450,00),(650,300),(0,255,0),0) #(x,y) x is horizontal
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# define range of skin color in HSV
lower_skin = np.array([0,20,70], dtype=np.uint8)
upper_skin = np.array([20,255,255], dtype=np.uint8)
#extract skin colur imagw
mask = cv2.inRange(hsv, lower_skin, upper_skin)
#extrapolate the hand to fill dark spots within
mask = cv2.dilate(mask,kernel,iterations = 4)
#blur the image
mask = cv2.GaussianBlur(mask,(5,5),100)
#find contours
_,contours,hierarchy= cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#find contour of max area(hand)
cnt = max(contours, key = lambda x: cv2.contourArea(x))
#approx the contour a little
epsilon = 0.0005*cv2.arcLength(cnt,True)
approx= cv2.approxPolyDP(cnt,epsilon,True)
#make convex hull around hand
hull = cv2.convexHull(cnt)
#define area of hull and area of hand
areahull = cv2.contourArea(hull)
areacnt = cv2.contourArea(cnt)
#find the percentage of area not covered by hand in convex hull
arearatio=((areahull-areacnt)/areacnt)*100
#find the defects in convex hull with respect to hand
hull = cv2.convexHull(approx, returnPoints=False)
defects = cv2.convexityDefects(approx, hull)
# l = no. of defects
l=0
#code for finding no. of defects due to fingers
for i in range(defects.shape[0]):
s,e,f,d = defects[i,0]
start = tuple(approx[s][0])
end = tuple(approx[e][0])
far = tuple(approx[f][0])
pt= (100,180)
# find length of all sides of triangle
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
s = (a+b+c)/2
ar = math.sqrt(s*(s-a)*(s-b)*(s-c))
#distance between point and convex hull
d=(2*ar)/a
# apply cosine rule here
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
# ignore angles > 90 and ignore points very close to convex hull(they generally come due to noise)
if angle <= 90 and d>30:
l += 1
cv2.circle(roi, far, 3, [255,0,0], -1)
#draw lines around hand
cv2.line(roi,start, end, [0,255,0], 2)
l+=1
#print corresponding gestures which are in their ranges
font = cv2.FONT_HERSHEY_SIMPLEX
if l==1:
if areacnt<2000:
cv2.putText(frame,'Put hand in the box',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
else:
if arearatio<12:
cv2.putText(frame,'0',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
frequency_tracker_right[0] += 1
elif arearatio<17.5:
cv2.putText(frame,'Best of luck',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
else:
cv2.putText(frame,'1',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
frequency_tracker_right[1] += 1
elif l==2:
cv2.putText(frame,'2',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
frequency_tracker_right[2] += 1
elif l==3:
if arearatio<27:
cv2.putText(frame,'3',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
frequency_tracker_right[3] += 1
else:
cv2.putText(frame,'ok',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
elif l==4:
cv2.putText(frame,'4',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
frequency_tracker_right[4] += 1
elif l==5:
cv2.putText(frame,'5',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
frequency_tracker_right[5] += 1
elif l==6:
cv2.putText(frame,'reposition',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
else :
cv2.putText(frame,'reposition',(400,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
#show the windows
cv2.imshow('mask2',mask)
cv2.imshow('frame',frame)
except:
pass
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
|
#! /usr/bin/python3
import sys
import json
import argparse
import logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--add", nargs='+', type=str, required=False,
help="List of strings to add to the vocab at the end.")
args = parser.parse_args()
return args
def main():
args = parse_args()
logging.basicConfig(level=logging.DEBUG)
logging.debug(args)
vocab = {}
for index, line in enumerate(sys.stdin):
line = line.strip()
parts = line.split("\t")
item = parts[0]
vocab[item] = index
if args.add is not None:
for item in args.add:
if item not in vocab.keys():
index += 1
vocab[item] = index
json.dump(vocab, sys.stdout, ensure_ascii=False, indent=4)
if __name__ == '__main__':
main()
|
from .SCM import SCM
__all__ = ['SCM']
|
from uFire_ISE import uFire_ISE
mv = uFire_ISE()
mv.measuremV()
print("mV: " + str(mv.mV))
|
from smpl_tools.actions import _determine_output_samplenames, _process_naming_pattern
import unittest
class SplitSilenceTest(unittest.TestCase):
def test_determine_multiple_filenames_extensionless_names_kept(self):
extensionless_name = "my_src"
result = _determine_output_samplenames(None, extensionless_name, 1)
self.assertTrue(extensionless_name in result[0])
pass
def test_determine_multiple_filenames_numbered_correctly(self):
extensionless_name = "my_src"
result = _determine_output_samplenames(None, extensionless_name, 2)
self.assertEquals(result, ["my_src_01.wav", "my_src_02.wav"])
pass
def test_naming_pattern_replacement(self):
test_pattern = "%(trck)/%(smpl).wav"
result = _process_naming_pattern(
test_pattern,
sample_name="alpha1",
track_name="track01"
)
self.assertEquals(result, "track01/alpha1.wav")
def test_naming_pattern_replacement_removes_sample_ext(self):
test_pattern = "%(trck)/%(smpl).wav"
result = _process_naming_pattern(
test_pattern,
sample_name="alpha1.wav",
track_name="track01"
)
self.assertEquals(result, "track01/alpha1.wav")
def test_naming_pattern_replacement_removes_track_ext(self):
test_pattern = "%(trck)/%(smpl).wav"
result = _process_naming_pattern(
test_pattern,
sample_name="alpha1",
track_name="track01.wav"
)
self.assertEquals(result, "track01/alpha1.wav")
def test_naming_pattern_replacement_adds_ext(self):
test_pattern = "%(trck)/%(smpl)"
result = _process_naming_pattern(
test_pattern,
sample_name="alpha1.wav",
track_name="track01"
)
self.assertEquals(result, "track01/alpha1.wav")
|
"""
notes:
? docs:
? I said pick up the can.
§ TODO:
§ Amplify weapons on Wallhammer.
% FIXME:
% Target compromised: move in, move in.
& FIX:
& Overwatch, target one sterilized.
µ WHYNOT:
µ make a floor list with room class instances, use index with movement()?
! IMPORTANT:
! Roger that. Waiting for contact. Over.
$ LOGS:
$ 28/10/2021
$ 01:54AM
$ don't really know where im going with this one
"""
#encoding
#coding:utf-8
#libraries/modules
#import time
import libs.actions.f1actions as f1actions
import libs.actions.f0actions as f0actions
#libs setup
#classes
#functions
def choose_action(argUserChoice,argUserRoom,argUserFloor):
if argUserFloor == 1:
if argUserRoom == 1:
if argUserChoice == 1:
return f1actions.bedroom_jump_from_window()
else:
print("\nInvalid Action Choice.\n")
if argUserRoom == 2:
pass
if argUserRoom == 3:
pass
if argUserRoom == 4:
pass
if argUserRoom == 5:
if argUserChoice == 1:
return f1actions.sibling_change_clothes()
else:
print("\nInvalid Action Choice.\n")
if argUserRoom == 6:
pass
if argUserRoom == 7:
if argUserChoice == 1:
return f1actions.restrooms_flush()
if argUserChoice == 2:
return f1actions.restrooms_use()
if argUserChoice == 3:
return f1actions.restrooms_drink()
else:
print("\nInvalid Action Choice.\n")
if argUserRoom == 8:
pass
if argUserFloor == 0:
if argUserRoom == 1:
pass
if argUserRoom == 2:
if argUserChoice == 1:
return f0actions.hallway2_observe_painting()
else:
print("\nInvalid Action Choice.\n")
if argUserRoom == 3:
pass
if argUserRoom == 4:
if argUserChoice == 1:
return f0actions.bathroom_look_at_mirror()#% make it so the spooky stuff only happens once
if argUserChoice == 2:
return f0actions.bathroom_wash_hands()
if argUserChoice == 3:
return f0actions.bathroom_pull_curtain()
else:
print("\nInvalid Action Choice.\n")
if argUserRoom == 5:
pass
if argUserRoom == 6:
if argUserChoice == 1:
return f0actions.dining_sit()
else:
print("\nInvalid Action Choice.\n")
if argUserRoom == 7:
pass
if argUserRoom == 8:
pass
if argUserRoom == 9:
pass
if argUserRoom == 10:
pass
#input("(press any key to continue...) > ")
#script
|
__author__ = 'dimd'
from zope.interface import Interface, Attribute
class IValidator(Interface):
is_valid = Attribute('is this message valid')
message_type = Attribute('show us the type of the message')
def validate():
"""
Validate incoming message for our supported types
:return: self
"""
class IValidatorResponse(Interface):
response = Attribute(
"""
Helper interface that provides attribute to hold a valid response from our main validator
"""
)
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
__copyright__ = ('Copyright Amazon.com, Inc. or its affiliates. '
'All Rights Reserved.')
__version__ = '2.6.1'
__license__ = 'MIT-0'
__author__ = 'Akihiro Nakajima'
__url__ = 'https://github.com/aws-samples/siem-on-amazon-opensearch-service'
from siem.sf_config_snapshot import (extract_host, extract_ip, extract_user,
update_doc_ids)
def transform(logdata):
logdata = update_doc_ids(logdata)
logdata = extract_host(logdata)
logdata = extract_user(logdata)
logdata = extract_ip(logdata)
try:
compliance = logdata['newEvaluationResult']['complianceType']
except KeyError:
compliance = None
if compliance:
if compliance == 'COMPLIANT':
logdata['event']['outcome'] = 'success'
elif compliance == 'NON_COMPLIANT':
logdata['event']['outcome'] = 'failure'
else:
# INSUFFICIENT_DATA
# NOT_APPLICABLE
logdata['event']['outcome'] = 'unknown'
return logdata
|
import os
from nacl.encoding import Base64Encoder, HexEncoder, URLSafeBase64Encoder
from nacl.signing import SigningKey
key_length = 32
private_key_signature = b"\x00\x00\x00\x40"
public_key_signature = b"\x00\x00\x00\x20"
device_ip_cache_filename = ".device_ip.cache"
def read_device_ip_from_cache(device_id: str):
"""
Reads the cache file and looks if it has seen the device before
:param str device_id: ID of the device to look for
:returns: IP of the device, None otherwise
:rtype: str or None
"""
with open(device_ip_cache_filename, "r") as f:
for line in reversed(f.readlines()):
if line.startswith(device_id):
return line.split()[1]
def write_device_ip_to_cache(device_id: str, ip: str):
"""
Writes the IP of the device to the cache file
:param str device_id: ID of the device
:param str ip: IP of the device
"""
if not os.path.exists(device_ip_cache_filename):
with open(device_ip_cache_filename, "w") as f:
f.write("")
current = read_device_ip_from_cache(device_id)
if current is None or current != ip:
with open(device_ip_cache_filename, "a+") as f:
f.write(f"{device_id} {ip}\n")
def sign_file(file_path: str, sign_key: str):
"""
Sign a file with the given key
:param str file_path: path to the file to sign
:param str sign_key: private key to sign the file with. Expects it to be in Base64 format.
"""
signing_key = extract_curve_private_key(sign_key)
signed = signing_key.sign(get_sha256_hash(file_path), encoder=Base64Encoder)
signature = signed.signature
""" Replacing padding characters for uri encoding. """
signature = signature.decode("utf-8").replace("+", "-").replace("/", "_")
with open(file_path + ".sha256.signed", "wb") as f:
f.write(signature.encode("utf-8"))
def get_pubkey(sign_key):
"""
Get the public key from the given private key
:param str sign_key: private key to get the public key from. Expects it to be in Base64 format.
:return: public key in Hex format
:rtype: str
"""
signing_key = extract_curve_private_key(sign_key)
return signing_key.verify_key.encode(encoder=HexEncoder)
def get_sha256_hash(file_path):
"""
Get the sha256 hash of the given file
:param str file_path: path to the file to get the hash of
:return: sha256 hash of the file in byte format
:rtype: bytes
"""
import hashlib
sha256_hash = hashlib.sha256()
with open(file_path, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
return sha256_hash.digest()
def bytes_from_file(file_path, chunksize=8192):
"""
Reads the given file and returns the contents as a byte array
:param str file_path: path to the file to read
:param int chunksize: size of the chunks to read
:returns: the contents of the file as a byte array
:rtype: bytearray
"""
with open(file_path, "rb") as f:
while True:
if chunk := f.read(chunksize):
yield from chunk
else:
break
def bytes_after(signature, length, bytestr):
"""
Returns the bytes after the given signature
:param bytes signature: the signature to find the bytes after
:param int length: the length of the bytes to return
:param bytes bytestr: the bytes to search in
:returns: the bytes after the signature
:rtype: bytes
"""
start = bytestr.find(signature) + len(signature)
return bytestr[start : start + length]
def extract_curve_private_key(openssh_priv_key):
"""
Returns the curve private key from the openssh private key
:param str openssh_priv_key: openssh private key
:returns: the curve private key
"""
return SigningKey(seed=openssh_priv_key, encoder=URLSafeBase64Encoder)
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#tag::example-block[]
from pyignite import Client
from pyignite.datatypes.cache_config import CacheMode
from pyignite.datatypes.prop_codes import *
from pyignite.exceptions import SocketError
nodes = [
('127.0.0.1', 10800),
('217.29.2.1', 10800),
('200.10.33.1', 10800),
]
client = Client(timeout=40.0)
client.connect(nodes)
print('Connected to {}'.format(client))
my_cache = client.get_or_create_cache({
PROP_NAME: 'my_cache',
PROP_CACHE_MODE: CacheMode.REPLICATED,
})
my_cache.put('test_key', 0)
# Abstract main loop
while True:
try:
# Do the work
test_value = my_cache.get('test_key')
my_cache.put('test_key', test_value + 1)
except (OSError, SocketError) as e:
# Recover from error (repeat last command, check data
# consistency or just continue − depends on the task)
print('Error: {}'.format(e))
print('Last value: {}'.format(my_cache.get('test_key')))
print('Reconnected to {}'.format(client))
#end::example-block[]
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unit test for User Group creation servlet."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import settings
from framework import permissions
from proto import site_pb2
from proto import usergroup_pb2
from services import service_manager
from sitewide import groupcreate
from testing import fake
from testing import testing_helpers
class GroupCreateTest(unittest.TestCase):
"""Tests for the GroupCreate servlet."""
def setUp(self):
self.services = service_manager.Services(
user=fake.UserService(),
usergroup=fake.UserGroupService(),
project=fake.ProjectService())
self.servlet = groupcreate.GroupCreate(
'req', 'res', services=self.services)
self.mr = testing_helpers.MakeMonorailRequest()
def CheckAssertBasePermissions(
self, restriction, expect_admin_ok, expect_nonadmin_ok):
old_group_creation_restriction = settings.group_creation_restriction
settings.group_creation_restriction = restriction
# Anon users can never do it
mr = testing_helpers.MakeMonorailRequest(
perms=permissions.GetPermissions(None, {}, None))
self.assertRaises(
permissions.PermissionException,
self.servlet.AssertBasePermission, mr)
mr = testing_helpers.MakeMonorailRequest()
if expect_admin_ok:
self.servlet.AssertBasePermission(mr)
else:
self.assertRaises(
permissions.PermissionException,
self.servlet.AssertBasePermission, mr)
mr = testing_helpers.MakeMonorailRequest(
perms=permissions.GetPermissions(mr.auth.user_pb, {111}, None))
if expect_nonadmin_ok:
self.servlet.AssertBasePermission(mr)
else:
self.assertRaises(
permissions.PermissionException,
self.servlet.AssertBasePermission, mr)
settings.group_creation_restriction = old_group_creation_restriction
def testAssertBasePermission(self):
self.CheckAssertBasePermissions(
site_pb2.UserTypeRestriction.ANYONE, True, True)
self.CheckAssertBasePermissions(
site_pb2.UserTypeRestriction.ADMIN_ONLY, True, False)
self.CheckAssertBasePermissions(
site_pb2.UserTypeRestriction.NO_ONE, False, False)
def testGatherPageData(self):
page_data = self.servlet.GatherPageData(self.mr)
self.assertEqual('', page_data['initial_name'])
def testProcessFormData_Normal(self):
post_data = fake.PostData(
groupname=['group@example.com'], visibility='1')
url = self.servlet.ProcessFormData(self.mr, post_data)
self.assertIn('/g/3444127190/', url)
group_id = self.services.user.LookupUserID('cnxn', 'group@example.com')
group_settings = self.services.usergroup.GetGroupSettings('cnxn', group_id)
self.assertIsNotNone(group_settings)
members_after, owners_after = self.services.usergroup.LookupMembers(
'cnxn', [group_id])
self.assertEqual(0, len(members_after[group_id] + owners_after[group_id]))
def testProcessFormData_Import(self):
post_data = fake.PostData(
groupname=['group@example.com'], group_type='1',
import_group=['on'])
self.servlet.ProcessFormData(self.mr, post_data)
group_id = self.services.user.LookupUserID('cnxn', 'group@example.com')
group_settings = self.services.usergroup.GetGroupSettings('cnxn', group_id)
self.assertIsNotNone(group_settings)
self.assertEqual(usergroup_pb2.MemberVisibility.OWNERS,
group_settings.who_can_view_members)
self.assertEqual(usergroup_pb2.GroupType.MDB,
group_settings.ext_group_type)
|
from ..extensions import db
class Election(db.Model):
""" Implements an object to represent all data relating to a specific
election """
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, unique=True, nullable=False)
voters = db.relationship('Voter',
backref=db.backref('election', lazy='joined'),
lazy=True)
authorities = db.relationship('Authority',
backref=db.backref('election', lazy='joined'),
lazy=True)
candidates = db.relationship('Candidate',
backref=db.backref('election', lazy='joined'),
lazy=True)
bulletin = db.Column(db.Text, default="")
results = db.Column(db.Text)
seats = db.Column(db.Integer)
def __init__(self, name):
self.name = name
def __repr__(self):
return f'<Election {self.id} ({self.name})>'
|
import os
import cv2
import datetime
def read_image(img_path):
img = cv2.imread(img_path)
img[:,:,::-1] = img
return img
def rgb2gray(img):
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY).squeeze()
def save_image(img_path, img):
cv2.imwrite(img_path, img[:,:,::-1])
def get_all_paths(folder, ext=None):
paths = [os.path.join(folder, f) for f in sorted(os.listdir(folder))]
if ext:
paths = [p for p in paths if p.endswith(ext)]
return paths
def get_datetime():
return datetime.datetime.now().strftime('%m%d_%H%M')
class LogWriter():
def __init__(self, log_path=None, print_out=True, log_time=True, clear_pre_content=True):
self.log_path = log_path
self.print_out = print_out
self.log_time = log_time
if log_path and clear_pre_content:
os.system('rm -f '+log_path)
def __call__(self, log_content):
log_content = self.add_info(log_content)
if self.print_out:
self.Print(log_content)
if self.log_path:
self.Save(self.log_path, log_content)
def add_info(self, log_content):
if self.log_time:
log_content = ('LOG[%s]: '%get_datetime())+log_content
return log_content
def Print(self, log_content):
print(log_content)
def Save(self, log_path, log_content):
if log_path:
with open(log_path, 'a') as f:
f.write(log_content+'\n')
|
from model.contact import Contact
def test_add_contact(app):
old_contact = app.contact.get_contact_list()
contact = Contact(firstname="Ivan", lastname="Ivanov")
app.contact.add(contact)
assert len(old_contact) + 1 == app.contact.count()
new_contact = app.contact.get_contact_list()
old_contact.append(contact)
assert sorted(old_contact, key=Contact.id_or_max) == sorted(new_contact, key=Contact.id_or_max)
|
# Copyright 2019-2020 Not Just A Toy Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as ty
from falcon_heavy.utils import cached_property
from .base import AbstractConvertible, BaseType
from .path import Path
__all__ = (
'AnyType',
'LazyType',
)
class AnyType(BaseType):
"""Any type"""
__slots__ = ()
T = ty.TypeVar('T')
class LazyType(AbstractConvertible[T]):
"""Lazy type
Resolves target type when it needed
:param resolver: callable for lazy resolving of target type
"""
def __init__(self, resolver: ty.Callable[[], AbstractConvertible[T]], **kwargs: ty.Any) -> None:
self.resolver = resolver
super(LazyType, self).__init__(**kwargs)
@cached_property
def resolved(self) -> AbstractConvertible[T]:
return self.resolver()
def convert(self, value: ty.Any, path: Path, *args: ty.Any, **context: ty.Any) -> ty.Optional[T]:
return self.resolved.convert(value, path, **context)
|
import functools
import os
import subprocess
import sys
import click
# TODO: CAMPid 0970432108721340872130742130870874321
def import_it(*segments):
import importlib
import pkg_resources
major = int(pkg_resources.get_distribution(__name__.partition('.')[0]).version.partition(".")[0])
m = {
"pyqt_tools": "pyqt{major}_tools".format(major=major),
"pyqt_plugins": "pyqt{major}_plugins".format(major=major),
"qt_tools": "qt{major}_tools".format(major=major),
"qt_applications": "qt{major}_applications".format(major=major),
}
majored = [m[segments[0]], *segments[1:]]
return importlib.import_module(".".join(majored))
qt_applications = import_it("qt_applications")
qt_tools = import_it("qt_tools")
fspath = getattr(os, 'fspath', str)
@click.group()
def main():
pass
def run(
application_name,
args=(),
environment=os.environ,
sys_platform=sys.platform,
):
modified_environment = qt_tools.create_environment(
reference=environment,
)
command_elements = qt_tools.create_command_elements(
name=application_name,
sys_platform=sys_platform,
)
completed_process = subprocess.run(
[*command_elements, *args],
env=modified_environment,
)
return completed_process.returncode
# written by build.py
# @main.command(
# add_help_option=False,
# context_settings={
# 'ignore_unknown_options': True,
# 'allow_extra_args': True,
# },
# )
# @click.pass_context
# def designer(ctx):
# return run('designer', args=ctx.args)
# ---- start of generated wrapper entry points
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def assistant(ctx):
return run('assistant', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def canbusutil(ctx):
return run('canbusutil', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def designer(ctx):
return run('designer', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def lconvert(ctx):
return run('lconvert', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def licheck64(ctx):
return run('licheck64', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def linguist(ctx):
return run('linguist', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def lprodump(ctx):
return run('lprodump', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def lrelease(ctx):
return run('lrelease', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def lrelease_pro(ctx):
return run('lrelease-pro', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def lupdate(ctx):
return run('lupdate', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def lupdate_pro(ctx):
return run('lupdate-pro', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def moc(ctx):
return run('moc', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def pixeltool(ctx):
return run('pixeltool', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qcollectiongenerator(ctx):
return run('qcollectiongenerator', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qdbus(ctx):
return run('qdbus', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qdbuscpp2xml(ctx):
return run('qdbuscpp2xml', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qdbusviewer(ctx):
return run('qdbusviewer', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qdbusxml2cpp(ctx):
return run('qdbusxml2cpp', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qdistancefieldgenerator(ctx):
return run('qdistancefieldgenerator', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qdoc(ctx):
return run('qdoc', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qgltf(ctx):
return run('qgltf', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qhelpgenerator(ctx):
return run('qhelpgenerator', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qlalr(ctx):
return run('qlalr', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmake(ctx):
return run('qmake', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qml(ctx):
return run('qml', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmlcachegen(ctx):
return run('qmlcachegen', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmleasing(ctx):
return run('qmleasing', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmlformat(ctx):
return run('qmlformat', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmlimportscanner(ctx):
return run('qmlimportscanner', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmllint(ctx):
return run('qmllint', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmlmin(ctx):
return run('qmlmin', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmlplugindump(ctx):
return run('qmlplugindump', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmlpreview(ctx):
return run('qmlpreview', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmlprofiler(ctx):
return run('qmlprofiler', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmlscene(ctx):
return run('qmlscene', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmltestrunner(ctx):
return run('qmltestrunner', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qmltyperegistrar(ctx):
return run('qmltyperegistrar', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qscxmlc(ctx):
return run('qscxmlc', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qtattributionsscanner(ctx):
return run('qtattributionsscanner', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qtdiag(ctx):
return run('qtdiag', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qtpaths(ctx):
return run('qtpaths', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qtplugininfo(ctx):
return run('qtplugininfo', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qtwaylandscanner(ctx):
return run('qtwaylandscanner', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def qvkgen(ctx):
return run('qvkgen', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def rcc(ctx):
return run('rcc', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def repc(ctx):
return run('repc', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def sdpscanner(ctx):
return run('sdpscanner', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def tracegen(ctx):
return run('tracegen', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def uic(ctx):
return run('uic', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def xmlpatterns(ctx):
return run('xmlpatterns', args=ctx.args)
@main.command(
add_help_option=False,
context_settings={
'ignore_unknown_options': True,
'allow_extra_args': True,
},
)
@click.pass_context
def xmlpatternsvalidator(ctx):
return run('xmlpatternsvalidator', args=ctx.args)
# ---- end of generated wrapper subcommands
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'publish_progress_form.ui'
#
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from tank.platform.qt import QtCore, QtGui
class Ui_PublishProgressForm(object):
def setupUi(self, PublishProgressForm):
PublishProgressForm.setObjectName("PublishProgressForm")
PublishProgressForm.resize(651, 384)
self.verticalLayout_4 = QtGui.QVBoxLayout(PublishProgressForm)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setSpacing(-1)
self.verticalLayout_3.setObjectName("verticalLayout_3")
spacerItem1 = QtGui.QSpacerItem(20, 100, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.verticalLayout_3.addItem(spacerItem1)
self.title = QtGui.QLabel(PublishProgressForm)
self.title.setStyleSheet("#title {\n"
"font-size: 24px;\n"
"}")
self.title.setObjectName("title")
self.verticalLayout_3.addWidget(self.title)
self.progress_bar = QtGui.QProgressBar(PublishProgressForm)
self.progress_bar.setProperty("value", 24)
self.progress_bar.setObjectName("progress_bar")
self.verticalLayout_3.addWidget(self.progress_bar)
self.details = QtGui.QLabel(PublishProgressForm)
self.details.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.details.setWordWrap(False)
self.details.setObjectName("details")
self.verticalLayout_3.addWidget(self.details)
self.stage_progress_bar = QtGui.QProgressBar(PublishProgressForm)
self.stage_progress_bar.setProperty("value", 24)
self.stage_progress_bar.setObjectName("stage_progress_bar")
self.verticalLayout_3.addWidget(self.stage_progress_bar)
spacerItem2 = QtGui.QSpacerItem(20, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem2)
self.verticalLayout_3.setStretch(5, 1)
self.horizontalLayout.addLayout(self.verticalLayout_3)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem3)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 5)
self.horizontalLayout.setStretch(2, 1)
self.verticalLayout_4.addLayout(self.horizontalLayout)
self.verticalLayout_4.setStretch(0, 1)
self.retranslateUi(PublishProgressForm)
QtCore.QMetaObject.connectSlotsByName(PublishProgressForm)
def retranslateUi(self, PublishProgressForm):
PublishProgressForm.setWindowTitle(QtGui.QApplication.translate("PublishProgressForm", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.title.setText(QtGui.QApplication.translate("PublishProgressForm", "Publishing...", None, QtGui.QApplication.UnicodeUTF8))
self.details.setText(QtGui.QApplication.translate("PublishProgressForm", "(Details)", None, QtGui.QApplication.UnicodeUTF8))
from . import resources_rc
|
import sys
import os
from pathlib import Path
import shutil
unary_op = ['-', '~']
op = ['+', '*', '&', '<', '>', '/', '|', '-', '=']
class_var_dec = ['field', 'static']
types = ['int', 'char', 'boolean']
statements = ['let', 'while', 'if', 'do', 'return']
subDec = ['constructor', 'method', 'function']
keywordConstants = ['true', 'false', 'null', 'this']
keywords = ['class', 'function', 'constructor', 'method', 'var', 'int', 'char', 'boolean', 'void', 'true', 'false', 'null', 'this', 'field', 'static', 'let', 'if', 'while', 'do', 'else', 'return']
symbols = ['=','.', '-', '+', '/', '*', '{', '}', '(', ')', '<', '>', '|', '&', ';', '[', ']', ',', '~']
tokenIndex = 0
analysed_tokens = list()
currentToken = tuple()
output_file = None
def compileClass():
output = '<class>\n'
writeXML(output)
# keyword class
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
# class Name
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
eat('{')
while currentToken[1] != '}':
if currentToken[1] in class_var_dec:
compileClassVarDec()
elif currentToken[1] in subDec:
compileSubRoutineDec()
eat('}')
writeXML('</class>\n')
def compileClassVarDec():
writeXML('<classVarDec>\n')
# field/static
eat(currentToken[1])
# int, boolean, char
eat(currentToken[1])
# varname
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
while currentToken[1] == ',':
eat(',')
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
eat(';')
writeXML('</classVarDec>\n')
def compileSubRoutineDec():
writeXML('<subroutineDec>\n')
# constructor/method/function
eat(currentToken[1])
# void/types
if currentToken[1] == 'void' or currentToken[1] in types:
eat(currentToken[1])
# className type
else:
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
# subroutineName
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
eat('(')
compileParameterList()
eat(')')
compileSubRoutineBody()
writeXML('</subroutineDec>\n')
def compileParameterList():
writeXML('<parameterList>\n')
while currentToken[1] != ')':
# var type
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
# variable name
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
if currentToken[1] == ',':
eat(',')
writeXML('</parameterList>\n')
def compileSubRoutineBody():
writeXML('<subroutineBody>\n')
eat('{')
while currentToken[1] != '}':
if currentToken[1] == 'var':
writeXML('<varDec>\n')
eat('var')
# variable type
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
# varName
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
if currentToken[1] == ',':
eat(',')
# varName
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
eat(';')
writeXML('</varDec>\n')
# compile statements
elif currentToken[1] in statements:
compileStatements()
eat('}')
writeXML('</subroutineBody>\n')
def compileStatements():
writeXML('<statements>\n')
while currentToken[1] in statements:
if currentToken[1] == 'if':
compileIfStatement()
elif currentToken[1] == 'while':
compileWhileStatement()
elif currentToken[1] == 'let':
compileLetStatement()
elif currentToken[1] == 'do':
compileDoStatement()
elif currentToken[1] == 'return':
compileReturnStatement()
writeXML('</statements>\n')
def compileIfStatement():
writeXML('<ifStatement>\n')
eat('if')
eat('(')
compileExpression()
eat(')')
eat('{')
compileStatements()
eat('}')
if currentToken[1] == 'else':
eat('else')
eat('{')
compileStatements()
eat('}')
writeXML('</ifStatement>\n')
def compileWhileStatement():
writeXML('<whileStatement>\n')
eat('while')
eat('(')
compileExpression()
eat(')')
eat('{')
if currentToken[1] != '}':
compileStatements()
eat('}')
writeXML('</whileStatement>\n')
def compileLetStatement():
writeXML('<letStatement>\n')
eat('let')
if currentToken[0] =='identifier':
first = currentToken
advanceToken()
# Array reference
if currentToken[1] == '[':
output = '<'+first[0]+'> '+first[1]+' </'+first[0]+'>\n'
writeXML(output)
eat('[')
compileExpression()
eat(']')
# usual variable name
else:
output = '<'+first[0]+'> '+first[1]+' </'+first[0]+'>\n'
writeXML(output)
else:
exitCompilation()
eat('=')
compileExpression()
eat(';')
writeXML('</letStatement>\n')
def compileDoStatement():
writeXML('<doStatement>\n')
eat('do')
if currentToken[0] == 'identifier':
first = currentToken
advanceToken()
# identifier()
if currentToken[1] == '(':
output = '<'+first[0]+'> '+first[1]+' </'+first[0]+'>\n'
writeXML(output)
eat('(')
compileExpressionList()
eat(')')
# className.method()
elif currentToken[1] == '.':
output = '<'+first[0]+'> '+first[1]+' </'+first[0]+'>\n'
writeXML(output)
eat('.')
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
eat('(')
compileExpressionList()
eat(')')
else:
exitCompilation()
else:
exitCompilation()
eat(';')
writeXML('</doStatement>\n')
def compileReturnStatement():
writeXML('<returnStatement>\n')
eat('return')
if currentToken[1] != ';':
compileExpression()
eat(';')
else:
eat(';')
writeXML('</returnStatement>\n')
def compileExpression():
writeXML('<expression>\n')
compileTerm()
while currentToken[1] in op:
eat(currentToken[1])
compileTerm()
writeXML('</expression>\n')
def compileExpressionList():
writeXML('<expressionList>\n')
while currentToken[1] != ')':
compileExpression()
if currentToken[1] == ',':
eat(',')
continue
writeXML('</expressionList>\n')
def compileTerm():
writeXML('<term>\n')
# if it is an int or str constant, we ouptut and advance token
if currentToken[0] == 'integerConstant' or currentToken[0] == 'stringConstant':
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
writeXML('</term>\n')
advanceToken()
# if it is a keyword constant, we ouptut and advance token
elif currentToken[1] in keywordConstants:
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
writeXML('</term>\n')
advanceToken()
# expression in parantheses eg sum = (2+3)
elif currentToken[1] == '(':
eat('(')
compileExpression()
eat(')')
writeXML('</term>\n')
# but if it is an identifier, the we need to check next token
elif currentToken[0] == 'identifier':
first = currentToken
# get the next token, current token refers to it
advanceToken()
# array reference
if currentToken[1] == '[':
output = '<'+first[0]+'> '+first[1]+' </'+first[0]+'>\n'
writeXML(output)
eat('[')
compileExpression()
eat(']')
writeXML('</term>\n')
# subroutine call eg Main.output()
elif currentToken[1] == '.':
output = '<'+first[0]+'> '+first[1]+' </'+first[0]+'>\n'
writeXML(output)
eat('.')
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
eat('(')
compileExpressionList()
eat(')')
writeXML('</term>\n')
# subroutine call eg cry()
elif currentToken[1] == '(':
output = '<'+first[0]+'> '+first[1]+' </'+first[0]+'>\n'
writeXML(output)
eat('(')
compileExpressionList()
eat(')')
writeXML('</term>\n')
# else a usual varname
else:
output = '<'+first[0]+'> '+first[1]+' </'+first[0]+'>\n'
writeXML(output)
writeXML('</term>\n')
# if -7 or ~term
elif currentToken[1] in unary_op:
eat(currentToken[1])
compileTerm()
writeXML('</term>\n')
else:
exitCompilation()
def eat(currentTokenCompare):
if currentToken[1] == currentTokenCompare:
if currentToken[1] == '>':
output = '<'+currentToken[0]+'> '+'>'+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
elif currentToken[1] == '<':
output = '<'+currentToken[0]+'> '+'<'+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
elif currentToken[1] == '&':
output = '<'+currentToken[0]+'>' + '&' +'</'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
else:
output = '<'+currentToken[0]+'> '+currentToken[1]+' </'+currentToken[0]+'>\n'
writeXML(output)
advanceToken()
else:
exitCompilation()
def advanceToken():
global tokenIndex
global currentToken
tokenIndex += 1
if tokenIndex < len(analysed_tokens):
currentToken = analysed_tokens[tokenIndex]
def exitCompilation():
print(currentToken)
print('Syntax error')
exit()
def tokenizer(jackFile):
tokensList = list()
with open(jackFile, 'r') as TokenizerInput:
line = TokenizerInput.readline()
while line != '':
if line.strip().startswith('/') or line.startswith('\n') or line.strip().startswith('*'):
line = TokenizerInput.readline()
continue
else:
tokenLine = line.strip().split('//')[0]
if tokenLine != '':
for letter in tokenLine:
tokensList.append(letter)
line = TokenizerInput.readline()
# print(tokensList)
len_of_tokensList = len(tokensList)
tokensListIndex = 0
tokenString = ""
while tokensListIndex < len_of_tokensList:
# check if we reached a space
if tokensList[tokensListIndex] == ' ':
# do we have integer constant
if tokenString != "":
if tokenString.isnumeric():
token = ("integerConstant",tokenString)
analysed_tokens.append(token)
tokenString = ""
tokensListIndex += 1
elif tokenString in keywords:
token = ("keyword", tokenString)
analysed_tokens.append(token)
tokenString = ""
tokensListIndex += 1
else:
token = ("identifier", tokenString)
analysed_tokens.append(token)
tokenString = ""
tokensListIndex += 1
else:
tokensListIndex += 1
# check if we reached a symbol
elif tokensList[tokensListIndex] in symbols:
# if we have a symbol, check to see if token string is empty
if tokenString != "":
if tokenString.isnumeric():
token = ("integerConstant",tokenString)
analysed_tokens.append(token)
token = ("symbol", tokensList[tokensListIndex])
analysed_tokens.append(token)
tokenString = ""
tokensListIndex += 1
elif tokenString in keywords:
token = ("keyword", tokenString)
analysed_tokens.append(token)
token = ("symbol", tokensList[tokensListIndex])
analysed_tokens.append(token)
tokenString = ""
tokensListIndex += 1
else:
token = ("identifier", tokenString)
analysed_tokens.append(token)
token = ("symbol", tokensList[tokensListIndex])
analysed_tokens.append(token)
tokenString = ""
tokensListIndex += 1
else:
token = ("symbol", tokensList[tokensListIndex])
analysed_tokens.append(token)
tokensListIndex += 1
# else append to token string
else:
# check if we dont have string constant
if tokensList[tokensListIndex] != '"':
tokenString += tokensList[tokensListIndex]
tokensListIndex += 1
# if we have string constant, form it and continue the loop
else:
stringConst = ''
tokensListIndex += 1
while tokensListIndex < len_of_tokensList and tokensList[tokensListIndex] != '"':
stringConst += tokensList[tokensListIndex]
tokensListIndex += 1
analysed_tokens.append(("stringConstant", stringConst))
tokensListIndex += 1
# print(analysed_tokens)
def writeXML(string):
with open(output_file, 'a') as xmlfile:
xmlfile.write(string)
def openDirForAnalysis(directory):
directory = Path(directory)
for jackFile in os.listdir(directory):
if jackFile.endswith('.jack'):
global output_file
output_file = jackFile.split('.')[0]+'.xml'
file_to_analyze = directory/jackFile
tokenizer(file_to_analyze)
global currentToken
currentToken = analysed_tokens[tokenIndex]
compileClass()
shutil.move(output_file, directory)
output_file = None
def openFileForAnalysis(jackFile):
tokenizer(jackFile)
global output_file
output_file = jackFile.split('.')[0]+'.xml'
global currentToken
currentToken = analysed_tokens[tokenIndex]
compileClass()
def main():
fileOrDir = sys.argv[1]
if os.path.isdir(fileOrDir):
directory = fileOrDir
openDirForAnalysis(directory)
else:
jackFile = fileOrDir
openFileForAnalysis(jackFile)
if __name__ == "__main__":
main()
|
from django.db import models
import django
import datetime
from django.conf import settings
# Create your models here.
class category(models.Model):
"""docstring for category"""
categoryName = models.CharField(max_length=50, primary_key=True)
categoryCount = models.IntegerField(default=0)
def __str__(self):
return "%s" %(self.categoryName)
class user(models.Model):
"""docstring for user"""
username = models.CharField(max_length = 400, primary_key = True)
password = models.CharField(max_length = 42)
def __str__(self):
return "%s" %(self.username)
class act(models.Model):
"""docstring for Act"""
actId = models.IntegerField(primary_key=True)
username = models.ForeignKey(user, default='johndoe==', on_delete=models.SET_DEFAULT)
timestamp = models.DateTimeField(default=django.utils.timezone.now)
caption = models.CharField(max_length=200)
upvotes = models.IntegerField(default=0)
imgB64 = models.CharField(max_length=1000)
categoryName = models.ForeignKey(category, on_delete=models.CASCADE)
def __str__(self):
return "%d, %s, %s" %(self.actId, self.imgB64, self.caption)
|
# -*- coding: utf-8 -*-
'''
Use the cloud cache on the master to derive IPv4 addresses based on minion ID.
This roster requires that the minion in question was created using at least the
2015.2.0 version of Salt Cloud. Starting with the 2015.2.0 release, Salt Cloud
maintains an index of minions that it creates and deletes. This index tracks the
provider and profile configuration used to provision the minion, including
authentication information. So long as this configuration remains current, it can
be used by Salt SSH to log into any minion in the index.
'''
# Import python libs
from __future__ import absolute_import
import os.path
# Import 3rd-party libs
import msgpack
# Import Salt libs
import salt.loader
import salt.utils
import salt.utils.cloud
import salt.utils.validate.net
import salt.config
from salt import syspaths
from salt.ext.six import string_types
def targets(tgt, tgt_type='glob', **kwargs): # pylint: disable=W0613
'''
Return the targets from the flat yaml file, checks opts for location but
defaults to /etc/salt/roster
'''
ret = {}
cache = os.path.join(syspaths.CACHE_DIR, 'cloud', 'index.p')
if not os.path.exists(cache):
return {}
with salt.utils.fopen(cache, 'r') as fh_:
cache_data = msgpack.load(fh_)
indexed_minion = cache_data.get(tgt, None)
if indexed_minion is None:
return {}
client = salt.cloud.CloudClient(
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud')
)
info = client.action('show_instance', names=[tgt])
if not info:
return {}
provider = indexed_minion.get('provider', None)
profile = indexed_minion.get('profile', None)
driver = indexed_minion.get('driver', None)
vm_ = {
'provider': provider,
'profile': profile,
}
full_info = info.get(provider, {}).get(driver, {}).get(tgt, {}).get(tgt, {})
public_ips = full_info.get('public_ips', [])
private_ips = full_info.get('private_ips', [])
ip_list = []
for item in (public_ips, private_ips):
if isinstance(item, list):
ip_list = ip_list + item
elif isinstance(item, string_types):
ip_list.append(item)
roster_order = __opts__.get('roster_order', (
'public', 'private', 'local'
))
preferred_ip = extract_ipv4(roster_order, ip_list)
ret['tgt'] = {
'host': preferred_ip,
}
cloud_opts = salt.config.cloud_config('/etc/salt/cloud')
ssh_username = salt.utils.cloud.ssh_usernames({}, cloud_opts)
if isinstance(ssh_username, string_types):
ret['tgt']['user'] = ssh_username
elif isinstance(ssh_username, list):
ret['tgt']['user'] = ssh_username[0]
password = salt.config.get_cloud_config_value(
'password', vm_, cloud_opts, search_global=False, default=None
)
if password:
ret['tgt']['password'] = password
key_filename = salt.config.get_cloud_config_value(
'private_key', vm_, cloud_opts, search_global=False, default=None
)
if key_filename:
ret['tgt']['priv'] = key_filename
return ret
def extract_ipv4(roster_order, ipv4):
'''
Extract the preferred IP address from the ipv4 grain
'''
for ip_type in roster_order:
for ip_ in ipv4:
if ':' in ip_:
continue
if not salt.utils.validate.net.ipv4_addr(ip_):
continue
if ip_type == 'local' and ip_.startswith('127.'):
return ip_
elif ip_type == 'private' and not salt.utils.cloud.is_public_ip(ip_):
return ip_
elif ip_type == 'public' and salt.utils.cloud.is_public_ip(ip_):
return ip_
return None
|
import cv2
import numpy as np
from base_camera import BaseCamera
def merge(left_image, right_image):
return np.concatenate((left_image, right_image), axis=1)
class Camera(BaseCamera):
video_source_1 = 1
video_source_2 = 2
@staticmethod
def set_video_sources(source_1, source_2):
Camera.video_source_1 = source_1
Camera.video_source_2 = source_2
@staticmethod
def frames():
camera_1 = cv2.VideoCapture(Camera.video_source_1)
camera_2 = cv2.VideoCapture(Camera.video_source_2)
if not camera_1.isOpened() and camera_2.isOpened():
raise RuntimeError('Could not start the cameras.')
while True:
# read current frame
_, img_1 = camera_1.read()
_, img_2 = camera_2.read()
img = merge(img_1, img_2)
# encode as a jpeg image and return it
yield cv2.imencode('.jpg', img)[1].tobytes()
|
# Authors: Cicely Motamedi, Adam Robinson
# Description: This file contains the main code for the microscope user interface.
# Some of the more complicated functionality is found in other files.
from kivy.app import App
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.textinput import TextInput
from kivy.uix.accordion import Accordion, AccordionItem
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import AsyncImage
from kivy.uix.slider import Slider
from kivy.config import Config
from kivy.core.window import Window
from kivy.clock import Clock
from threading import Thread
from kivy.uix.gridlayout import GridLayout
import sys
sys.path.append("..\\src")
from MicroscopeControl import MicroscopeController
from CustomBoxLayout import CustomBoxLayout
from ImageDisplay import ImageDisplay
import numpy as np
class accordionWidget(Accordion):
def __init__(self, *args, **kwargs):
kwargs['orientation'] = 'vertical'
kwargs['size_hint_x'] = 2
super(accordionWidget, self).__init__(*args, **kwargs)
#root = Accordion(orientation='vertical', size_hint_x=2)
item1 = AccordionItem(title='Camera')
item1.add_widget(Slider(min=-100, max=100, value=25))
self.add_widget(item1)
item2 = AccordionItem(title='Zoom and Focus')
box2 = BoxLayout(orientation='vertical')
instructions = Label(text='Enter a value between 0 and 1',size_hint_y=1)
box2.add_widget(instructions)
label1 = BoxLayout(orientation='horizontal',size_hint_y=1)
zoomLabel = Label(text='Zoom',size_hint_x=None,width=100, size_hint_y=None, height=40)
ZminusButton = Button(text='-',size_hint_x=None,width=30,size_hint_y=None,height=40)
self.zoomInput = TextInput(text='0.005',multiline=False,size_hint_x=None,width=100,size_hint_y=None,height=40)
ZplusButton = Button(text='+',size_hint_x=None,width=30,size_hint_y=None,height=40)
label1.add_widget(zoomLabel)
label1.add_widget(ZminusButton)
label1.add_widget(self.zoomInput)
label1.add_widget(ZplusButton)
box2.add_widget(label1)
self.zoomInput.bind(on_text_validate=self.setZoom)
ZminusButton.bind(on_release=self.incrementZoomMinus)
ZplusButton.bind(on_release=self.incrementZoomPlus)
instructions2 = Label(text='Enter a value between 0 and 1',size_hint_y=1)
box2.add_widget(instructions2)
label2 = BoxLayout(orientation='horizontal',size_hint_y=1)
focusLabel = Label(text='Focus',size_hint_x=None, width=100,size_hint_y=None,height=40)
FminusButton = Button(text='-',size_hint_x=None,width=30,size_hint_y=None,height=40)
self.focusInput = TextInput(text='0.005',multiline=False,size_hint_x=None,width=100,size_hint_y=None,height=40)
FplusButton = Button(text='+',size_hint_x=None,width=30,size_hint_y=None,height=40)
box2.add_widget(label2)
label2.add_widget(focusLabel)
label2.add_widget(FminusButton)
label2.add_widget(self.focusInput)
label2.add_widget(FplusButton)
self.focusInput.bind(on_text_validate=self.setFocus)
FminusButton.bind(on_release=self.incrementFocusMinus)
FplusButton.bind(on_release=self.incrementFocusPlus)
item2.add_widget(box2)
self.add_widget(item2)
item3 = AccordionItem(title='Stage Control')
gridLayout = GridLayout(cols=3)
gridLayout.add_widget(Button(opacity=0))
moveUp = Button(text='Up')
gridLayout.add_widget(moveUp)
gridLayout.add_widget(Button(opacity=0))
moveLeft = Button(text='Left')
gridLayout.add_widget(moveLeft)
gridLayout.add_widget(Button(opacity=0))
moveRight = Button(text='Right')
gridLayout.add_widget(moveRight)
gridLayout.add_widget(Button(opacity=0))
moveDown = Button(text='Down')
gridLayout.add_widget(moveDown)
gridLayout.add_widget(Button(opacity=0))
moveUp.bind(on_press=self.clockMoveUp)
moveUp.bind(on_release=self.stopClock)
item3.add_widget(gridLayout)
self.add_widget(item3)
item4 = AccordionItem(title='Image Settings')
item4.add_widget(Slider(min=-100, max=100, value=25))
self.add_widget(item4)
self.microscope = None
# self.zooming = False
# self.zoom_value = 0.5
# self.closing = False
# self.zoom_thread = Thread(target=self.adjustZoom)
# self.zoom_thread.start()
# self.focusing = False
# self.focus_value = 0.5
# self.focus_thread = Thread(target=self.adjustFocus)
# self.focus_thread.start()
def close(self):
pass
# def adjustZoom(self):
# while not self.closing:
# if self.microscope is not None and not self.zooming:
# current = self.microscope.focus.getZoom()
# if np.abs(current - self.zoom_value) > 0.005 and not self.zooming:
# def done():
# self.zooming = False
# self.zooming = True
# self.microscope.focus.setZoom(self.zoom_value, corrected=False, callback=done)
def setZoom(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting zoom")
#print(dir(object))
value = object.text
print(value)
try:
value = float(value)
except Exception as ex:
return
if value >= 0.005 and value <= 0.995:
print(value)
self.microscope.focus.setZoom(value, corrected=False, cb=done)
else:
print("Invalid input")
# def adjustFocus(self):
# while not self.closing:
# if self.microscope is not None and not self.focusing:
# current = self.microscope.focus.getFocus()
# if np.abs(current - self.focus_value) > 0.005 and not self.focusing:
# def done():
# self.focusing = False
# self.focusing = True
# self.microscope.focus.setFocus(self.focus_value, corrected=False, callback=done)
def setFocus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = object.text
print(value)
try:
value=float(value)
except Exception as ex:
return
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setFocus(value, corrected=False, cb=done)
else:
print("Invalid input")
def setMicroscope(self, ms):
self.microscope = ms
def incrementZoomMinus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = float(self.zoomInput.text)
value -= 0.1
self.zoomInput.text = "%1.4f"%value
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setZoom(value, corrected=False, cb=done)
else:
print("Invalid input")
def incrementZoomPlus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = float(self.zoomInput.text)
value += 0.1
self.zoomInput.text = "%1.4f"%value
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setZoom(value, corrected=False, cb=done)
else:
print("Invalid input")
def incrementFocusMinus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = float(self.focusInput.text)
value -= 0.1
self.focusInput.text = "%1.4f"%value
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setFocus(value, corrected=False, cb=done)
else:
print("Invalid input")
def incrementFocusPlus(self, object):
def done(error, val):
if error is None:
print(val)
else:
print("Error setting focus")
value = float(self.focusInput.text)
value += 0.1
self.focusInput.text = "%1.4f"%value
if value >= 0.005 and value <= 0.995:
self.microscope.focus.setFocus(value, corrected=False, cb=done)
else:
print("Invalid input")
def moveIncrementUp(self,a):
def done(error, value):
if error is None:
print(value)
else:
print(error)
self.microscope.stage.moveDelta(0, 0.01, cb=done)
def clockMoveUp(self,a):
Clock.schedule_interval(self.moveIncrementUp,0.01)
def stopClock(self, a):
Clock.unschedule(self.moveIncrementUp)
class userInterface(BoxLayout):
def initializeMicroscope(self):
self.microscope = MicroscopeController()
def close(self):
self.accordion.close()
if self.microscope is not None:
self.microscope.cleanup()
def __init__(self, **kwargs):
kwargs['orientation'] = 'horizontal'
super(userInterface, self).__init__(**kwargs)
self.accordion = accordionWidget()
self.add_widget(self.accordion)
self.display = BoxLayout(orientation='vertical', size_hint_x=4)
self.microscope = None
Thread(target=self.initializeMicroscope).start()
self.microscope_loaded = False
def checkMicroscope(a):
if not self.microscope_loaded and self.microscope is not None:
self.microscope_loaded = True
self.microscope.camera.enableLowRes()
self.accordion.setMicroscope(self.microscope)
self.microscope.camera.startCapture()
if self.microscope_loaded:
img = self.microscope.camera.getFrame()
img = np.rot90(img, 3, axes=(0, 1))
img = np.flipud(img)
self.image_display.setImage(img)
Clock.schedule_interval(checkMicroscope, 1 / 10)
# def _check_process(b):
# self.load_progress.value = self.current_progress
# if not t.is_alive():
# Clock.unschedule(_check_process)
# self.load_progress.opacity = 0
# self._parent_obj.interface.preview_pane.loadThumbnails(
# self._parent_obj.dataset
# )
# Clock.schedule_interval(_check_process, .025)
# self.display.add_widget(
# AsyncImage(source="https://images.squarespace-cdn.com/content/v1/5a5906400abd0406785519dd/1552662149940-G6MMFW3JC2J61UBPROJ5/ke17ZwdGBToddI8pDm48kLkXF2pIyv_F2eUT9F60jBl7gQa3H78H3Y0txjaiv_0fDoOvxcdMmMKkDsyUqMSsMWxHk725yiiHCCLfrh8O1z4YTzHvnKhyp6Da-NYroOW3ZGjoBKy3azqku80C789l0iyqMbMesKd95J-X4EagrgU9L3Sa3U8cogeb0tjXbfawd0urKshkc5MgdBeJmALQKw/baelen.jpg?format=1500w",size_hint_y=10)
# )
self.image_display = ImageDisplay(orientation='vertical', size_hint_y=10)
self.display.add_widget(self.image_display)
img = np.random.normal(0.0, 127, (1024, 1024, 3)).astype(np.uint8)
self.image_display.setImage(img)
self.input1 = BoxLayout(orientation='horizontal',size_hint_y=None, height=30)
xLabel = Label(text='X=',size_hint_x=1, size_hint_y=None, height=30)
self.xInput = TextInput(multiline=False,size_hint_x=4, size_hint_y=None, height=30)
yLabel = Label(text='Y=',size_hint_x=1, size_hint_y=None, height=30)
self.yInput = TextInput(multiline=False,size_hint_x=4, size_hint_y=None, height=30)
self.input1.add_widget(xLabel)
self.input1.add_widget(self.xInput)
#self.input2 = BoxLayout(orientation='horizontal',size_hint_y=1)
self.input1.add_widget(yLabel)
self.input1.add_widget(self.yInput)
self.add_widget(self.display)
self.display.add_widget(self.input1)
#self.display.add_widget(self.input2)
self.yInput.bind(on_text_validate=self.moveTo)
self.xInput.bind(on_text_validate=self.moveTo)
def moveTo(self, object):
def done():
if error is None:
print(val)
else:
print("Error moving stage")
xvalue = self.xInput.text
yvalue = self.yInput.text
print(xvalue, yvalue)
if xvalue.strip() == "" or xvalue is None:
xvalue = "0.0"
if yvalue.strip() == "" or yvalue is None:
yvalue = "0.0"
try:
xvalue=float(xvalue)
yvalue=float(yvalue)
except Exception as ex:
return
print(xvalue, yvalue)
if xvalue >= -50 and xvalue <= 50:
if yvalue >= -44 and yvalue <= 37:
self.microscope.stage.moveTo(xvalue, yvalue, callback=done)
else:
print("Invalid input")
class userInterfaceApp(App):
def on_request_close(self, *args):
print("close called")
self.interface.close()
return False
def build(self):
self.interface = userInterface()
Window.bind(on_request_close=self.on_request_close)
return self.interface
if __name__=="__main__":
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
userInterfaceApp().run()
|
from ttypes import *
# Alias for rpc_storage_mode
class StorageMode:
""" Confluo storage modes.
Attributes:
IN_MEMORY: Data stored in memory.
DURABLE_RELAXED: Relaxed (no linearizable guarantees).
DURABLE: Data is persisted.
"""
def __init__(self):
pass
IN_MEMORY = rpc_storage_mode.RPC_IN_MEMORY
DURABLE_RELAXED = rpc_storage_mode.RPC_DURABLE_RELAXED
DURABLE = rpc_storage_mode.RPC_DURABLE
|
# -*- coding: utf-8 -*-
class ThreadSafeCreateMixin(object):
"""
ThreadSafeCreateMixin can be used as an inheritance
of thread safe backend implementations
"""
@classmethod
def create(cls):
"""
Return always a new instance of the backend class
"""
return cls()
class SingletonCreateMixin(object):
"""
SingletonCreateMixin can be used as an inheritance
of singleton backend implementations
"""
_instances = {}
@classmethod
def create(cls):
"""
Return always the same instance of the backend class
"""
if cls not in cls._instances:
cls._instances[cls] = cls()
return cls._instances[cls]
|
"""Amendment of the DataLad `GitRepo` base class"""
__docformat__ = 'restructuredtext'
from . import utils as ut
from datalad.support.gitrepo import (
GitRepo as RevolutionGitRepo
)
obsolete_methods = (
'is_dirty',
)
# remove deprecated methods from API
for m in obsolete_methods:
if hasattr(RevolutionGitRepo, m):
setattr(RevolutionGitRepo, m, ut.nothere)
|
from django.db.models import Q
from rest_framework import serializers, viewsets
from brambling.api.v1.permissions import BaseOrderPermission
from brambling.models import (
Attendee,
EnvironmentalFactor,
Order,
)
class AttendeePermission(BaseOrderPermission):
def has_permission(self, request, view):
# For now, disallow creation via the API.
if request.method == 'POST':
return False
return True
def has_object_permission(self, request, view, attendee):
return self._has_order_permission(request, attendee.order)
class AttendeeSerializer(serializers.HyperlinkedModelSerializer):
ef_cause = serializers.SlugRelatedField(
slug_field='name',
queryset=EnvironmentalFactor.objects.all(),
many=True,
)
ef_avoid = serializers.SlugRelatedField(
slug_field='name',
queryset=EnvironmentalFactor.objects.all(),
many=True,
)
order = serializers.HyperlinkedRelatedField(view_name='order-detail', read_only=True)
link = serializers.HyperlinkedIdentityField(view_name='attendee-detail')
full_name = serializers.SerializerMethodField()
class Meta:
model = Attendee
fields = (
'id', 'link', 'order', 'first_name', 'middle_name', 'last_name',
'name_order', 'basic_completed', 'email', 'phone',
'liability_waiver', 'photo_consent', 'housing_status',
'housing_completed', 'ef_cause', 'ef_avoid',
'person_prefer', 'person_avoid', 'housing_prefer',
'other_needs', 'full_name',
)
def get_full_name(self, obj):
return obj.get_full_name()
class AttendeeViewSet(viewsets.ModelViewSet):
queryset = Attendee.objects.all()
serializer_class = AttendeeSerializer
permission_classes = [AttendeePermission]
def get_queryset(self):
qs = self.queryset.all().distinct()
if 'order' in self.request.GET:
qs = qs.filter(order=self.request.GET['order'])
# Superusers can see all the things.
if self.request.user.is_superuser:
return qs
# Otherwise, if you're authenticated, you can see them
# if the order is yours or you administer the related event.
if self.request.user.is_authenticated():
return qs.filter(
Q(order__person=self.request.user) |
Q(order__event__members=self.request.user) |
Q(order__event__organization__members=self.request.user)
)
# Otherwise, you can view for orders in your session.
session_orders = Order.objects._get_session(self.request)
return qs.filter(order__code__in=session_orders.values())
|
c.NotebookApp.ip = '0.0.0.0' #'localhost' # 指定
c.NotebookApp.open_browser = False # 关闭自动打开浏览器
#c.NotebookApp.ip = '*'
c.NotebookApp.allow_root = False
c.NotebookApp.port = 8889
c.NotebookApp.password = u'argon2:$argon2id$v=19$m=10240,t=10,p=8$zoyBM4a1oUIpw4bO/mciVQ$JN/sGgvyOhVpbpzU/z4u8HsAWG3dYXfavHFeaxADLgA'
|
"""Tests for letsencrypt.le_util."""
import errno
import os
import shutil
import stat
import tempfile
import unittest
import mock
from letsencrypt import errors
class MakeOrVerifyDirTest(unittest.TestCase):
"""Tests for letsencrypt.le_util.make_or_verify_dir.
Note that it is not possible to test for a wrong directory owner,
as this testing script would have to be run as root.
"""
def setUp(self):
self.root_path = tempfile.mkdtemp()
self.path = os.path.join(self.root_path, 'foo')
os.mkdir(self.path, 0o400)
self.uid = os.getuid()
def tearDown(self):
shutil.rmtree(self.root_path, ignore_errors=True)
def _call(self, directory, mode):
from letsencrypt.le_util import make_or_verify_dir
return make_or_verify_dir(directory, mode, self.uid)
def test_creates_dir_when_missing(self):
path = os.path.join(self.root_path, 'bar')
self._call(path, 0o650)
self.assertTrue(os.path.isdir(path))
self.assertEqual(stat.S_IMODE(os.stat(path).st_mode), 0o650)
def test_existing_correct_mode_does_not_fail(self):
self._call(self.path, 0o400)
self.assertEqual(stat.S_IMODE(os.stat(self.path).st_mode), 0o400)
def test_existing_wrong_mode_fails(self):
self.assertRaises(errors.Error, self._call, self.path, 0o600)
def test_reraises_os_error(self):
with mock.patch.object(os, 'makedirs') as makedirs:
makedirs.side_effect = OSError()
self.assertRaises(OSError, self._call, 'bar', 12312312)
class CheckPermissionsTest(unittest.TestCase):
"""Tests for letsencrypt.le_util.check_permissions.
Note that it is not possible to test for a wrong file owner,
as this testing script would have to be run as root.
"""
def setUp(self):
_, self.path = tempfile.mkstemp()
self.uid = os.getuid()
def tearDown(self):
os.remove(self.path)
def _call(self, mode):
from letsencrypt.le_util import check_permissions
return check_permissions(self.path, mode, self.uid)
def test_ok_mode(self):
os.chmod(self.path, 0o600)
self.assertTrue(self._call(0o600))
def test_wrong_mode(self):
os.chmod(self.path, 0o400)
self.assertFalse(self._call(0o600))
class UniqueFileTest(unittest.TestCase):
"""Tests for letsencrypt.le_util.unique_file."""
def setUp(self):
self.root_path = tempfile.mkdtemp()
self.default_name = os.path.join(self.root_path, 'foo.txt')
def tearDown(self):
shutil.rmtree(self.root_path, ignore_errors=True)
def _call(self, mode=0o600):
from letsencrypt.le_util import unique_file
return unique_file(self.default_name, mode)
def test_returns_fd_for_writing(self):
fd, name = self._call()
fd.write('bar')
fd.close()
self.assertEqual(open(name).read(), 'bar')
def test_right_mode(self):
self.assertEqual(0o700, os.stat(self._call(0o700)[1]).st_mode & 0o777)
self.assertEqual(0o100, os.stat(self._call(0o100)[1]).st_mode & 0o777)
def test_default_exists(self):
name1 = self._call()[1] # create 0000_foo.txt
name2 = self._call()[1]
name3 = self._call()[1]
self.assertNotEqual(name1, name2)
self.assertNotEqual(name1, name3)
self.assertNotEqual(name2, name3)
self.assertEqual(os.path.dirname(name1), self.root_path)
self.assertEqual(os.path.dirname(name2), self.root_path)
self.assertEqual(os.path.dirname(name3), self.root_path)
basename1 = os.path.basename(name2)
self.assertTrue(basename1.endswith('foo.txt'))
basename2 = os.path.basename(name2)
self.assertTrue(basename2.endswith('foo.txt'))
basename3 = os.path.basename(name3)
self.assertTrue(basename3.endswith('foo.txt'))
class UniqueLineageNameTest(unittest.TestCase):
"""Tests for letsencrypt.le_util.unique_lineage_name."""
def setUp(self):
self.root_path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.root_path, ignore_errors=True)
def _call(self, filename, mode=0o777):
from letsencrypt.le_util import unique_lineage_name
return unique_lineage_name(self.root_path, filename, mode)
def test_basic(self):
f, name = self._call("wow")
self.assertTrue(isinstance(f, file))
self.assertTrue(isinstance(name, str))
def test_multiple(self):
for _ in xrange(10):
f, name = self._call("wow")
self.assertTrue(isinstance(f, file))
self.assertTrue(isinstance(name, str))
self.assertTrue("wow-0009.conf" in name)
@mock.patch("letsencrypt.le_util.os.fdopen")
def test_failure(self, mock_fdopen):
err = OSError("whoops")
err.errno = errno.EIO
mock_fdopen.side_effect = err
self.assertRaises(OSError, self._call, "wow")
@mock.patch("letsencrypt.le_util.os.fdopen")
def test_subsequent_failure(self, mock_fdopen):
self._call("wow")
err = OSError("whoops")
err.errno = errno.EIO
mock_fdopen.side_effect = err
self.assertRaises(OSError, self._call, "wow")
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
import unittest
from source_document import SourceDocument
from test_tagged_document import create_test_repo
from tagged_document import TaggedDocument
class SourceDocumentTests(unittest.TestCase):
"""Unit tests for the Document class"""
def test_cleaning(self):
# Tests removing snippets
input_path = "tests/sample-expanded.txt"
reference_path = "tests/sample.txt"
reference_text = open(reference_path, "r").read()
document = SourceDocument(input_path)
self.assertEqual(document.cleaned_contents, reference_text)
def test_finding_documents(self):
found_documents = SourceDocument.find("tests", ["txt"])
self.assertTrue(len(found_documents) == 7)
def test_processing(self):
# Tests rendering a snippet using tagged documents.
repo = create_test_repo()
tagged_documents = TaggedDocument.find(repo, ["txt"])
self.assertTrue(tagged_documents)
input_path = "tests/sample.txt"
reference_path = "tests/sample-expanded.txt"
reference_text = open(reference_path, "r").read()
source = SourceDocument(input_path)
rendered_output = source.render(tagged_documents, language="swift",show_query=False)
self.assertEqual(rendered_output, (reference_text, True))
|
from dataclasses import dataclass
from typing import Optional
import hyperstate as hs
@dataclass(eq=True)
class DeepInner:
x: int
@dataclass(eq=True)
class PPO:
inner: Optional[DeepInner] = None
cliprange: float = 0.2
gamma: float = 0.99
lambd: float = 0.95
entcoeff: float = 0.01
value_loss_coeff: float = 1
@dataclass(eq=True)
class Config:
lr: float
steps: int
ppo: PPO
task_id: str
def test_override() -> None:
config = hs.load(
Config,
file=None,
overrides=[
"task_id=CherryPick",
"lr=0.1",
"steps=100",
"ppo.cliprange=0.1",
"ppo.inner.x=10",
],
)
assert config == Config(
lr=0.1,
steps=100,
ppo=PPO(cliprange=0.1, inner=DeepInner(x=10)),
task_id="CherryPick",
)
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monte-Carlo estimation of the KL divergence."""
from typing import Optional
import chex
from distrax._src.distributions.distribution import DistributionLike
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
PRNGKey = chex.PRNGKey
def estimate_kl_best_effort(
distribution_a: DistributionLike,
distribution_b: DistributionLike,
rng_key: PRNGKey,
num_samples: int,
proposal_distribution: Optional[DistributionLike] = None):
"""Estimates KL(distribution_a, distribution_b) exactly or with DiCE.
If the kl_divergence(distribution_a, distribution_b) is not supported,
the DiCE estimator is used instead.
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
rng_key: The PRNGKey random key.
num_samples: The number of samples, if using the DiCE estimator.
proposal_distribution: A proposal distribution for the samples, if using
the DiCE estimator. If None, use `distribution_a` as proposal.
Returns:
The estimated KL divergence.
"""
distribution_a = conversion.as_distribution(distribution_a)
distribution_b = conversion.as_distribution(distribution_b)
# If possible, compute the exact KL.
try:
return tfd.kl_divergence(distribution_a, distribution_b)
except NotImplementedError:
pass
return mc_estimate_kl(distribution_a, distribution_b, rng_key,
num_samples=num_samples,
proposal_distribution=proposal_distribution)
def mc_estimate_kl(
distribution_a: DistributionLike,
distribution_b: DistributionLike,
rng_key: PRNGKey,
num_samples: int,
proposal_distribution: Optional[DistributionLike] = None):
"""Estimates KL(distribution_a, distribution_b) with the DiCE estimator.
To get correct gradients with respect the `distribution_a`, we use the DiCE
estimator, i.e., we stop the gradient with respect to the samples and with
respect to the denominator in the importance weights. We then do not need
reparametrized distributions.
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
rng_key: The PRNGKey random key.
num_samples: The number of samples, if using the DiCE estimator.
proposal_distribution: A proposal distribution for the samples, if using the
DiCE estimator. If None, use `distribution_a` as proposal.
Returns:
The estimated KL divergence.
"""
if proposal_distribution is None:
proposal_distribution = distribution_a
proposal_distribution = conversion.as_distribution(proposal_distribution)
distribution_a = conversion.as_distribution(distribution_a)
distribution_b = conversion.as_distribution(distribution_b)
samples, logp_proposal = proposal_distribution.sample_and_log_prob(
seed=rng_key, sample_shape=[num_samples])
samples = jax.lax.stop_gradient(samples)
logp_proposal = jax.lax.stop_gradient(logp_proposal)
logp_a = distribution_a.log_prob(samples)
logp_b = distribution_b.log_prob(samples)
importance_weight = jnp.exp(logp_a - logp_proposal)
log_ratio = logp_b - logp_a
kl_estimator = -importance_weight * log_ratio
return jnp.mean(kl_estimator, axis=0)
def mc_estimate_kl_with_reparameterized(
distribution_a: DistributionLike,
distribution_b: DistributionLike,
rng_key: PRNGKey,
num_samples: int):
"""Estimates KL(distribution_a, distribution_b)."""
if isinstance(distribution_a, tfd.Distribution):
if distribution_a.reparameterization_type != tfd.FULLY_REPARAMETERIZED:
raise ValueError(
f'Distribution `{distribution_a.name}` cannot be reparameterized.')
distribution_a = conversion.as_distribution(distribution_a)
distribution_b = conversion.as_distribution(distribution_b)
samples, logp_a = distribution_a.sample_and_log_prob(
seed=rng_key, sample_shape=[num_samples])
logp_b = distribution_b.log_prob(samples)
log_ratio = logp_b - logp_a
kl_estimator = -log_ratio
return jnp.mean(kl_estimator, axis=0)
def mc_estimate_mode(
distribution: DistributionLike,
rng_key: PRNGKey,
num_samples: int):
"""Returns a Monte Carlo estimate of the mode of a distribution."""
distribution = conversion.as_distribution(distribution)
# Obtain samples from the distribution and their log probability.
samples, log_probs = distribution.sample_and_log_prob(
seed=rng_key, sample_shape=[num_samples])
# Do argmax over the sample_shape.
index = jnp.expand_dims(jnp.argmax(log_probs, axis=0), axis=0)
mode = jnp.squeeze(jnp.take_along_axis(samples, index, axis=0), axis=0)
return mode
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# -----------------------------------
# pylint: disable=line-too-long
# This is to allow complete package description on PyPI
"""
Core component of the Microsoft Graph Python SDK consisting of HTTP/Graph Client and a configurable middleware pipeline (Preview).
"""
from .core import SDK_VERSION
__version__ = SDK_VERSION
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.