content
stringlengths 5
1.05M
|
|---|
"""Test suites for MetaGenScope."""
|
from rest_framework import serializers
from django.contrib.auth import get_user_model
from accounts.models import User
class UserSerializer(serializers.ModelSerializer):
"""Serializer class for User object"""
class Meta:
model = User
fields = ('username', 'password', 'phone', 'address', 'gender',
'age', 'description', 'first_name', 'last_name', 'email')
extra_kwargs = {"password": {'write_only': True}}
def create(self, validated_data):
"""Create user with hashed password"""
return User.objects.create_user(**validated_data)
|
from .icosweepnet import IcoSweepNet
|
# Create your models here.
from django.db import models
from django.utils import timezone
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class Category(models.Model):
title = models.CharField(max_length=100,default="")
overview = models.CharField(max_length=100,default="")
coverImage = models.ImageField(upload_to="uploads/categories")
def __str__(self):
return self.title
class BlogPost(models.Model):
author = models.CharField(default="Sean Peart", max_length=50)
author_photo = models.ImageField(upload_to="uploads/author_photos", default="")
published = models.DateTimeField(default=timezone.now)
title = models.CharField(max_length=100)
overview = models.TextField(max_length=100)
body = RichTextUploadingField(blank=True,null=True)
body_2 = RichTextUploadingField(blank=True,null=True)
main_coverImage = models.ImageField(upload_to="uploads/blogImages", default="")
category = models.ForeignKey(Category, default="", on_delete=models.CASCADE) # have to look into this cascade thing
def __str__(self):
return self.title
|
import os
import math
import tqdm
import torch
import itertools
import traceback
import numpy as np
import model.ModifiedGenerator as ModifiedGenerator
import model.MultiScaleDiscriminator as MultiScaleDiscriminator
import stft_loss.MultiResolutionSTFTLoss as MultiResolutionSTFTLoss
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def num_params(model, print_out=True):
parameters = filter(lambda p: p.requires_grad, model.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
if print_out:
print('Trainable Parameters: %.3fM' % parameters)
def train(pt_dir, chkpt_path, trainloader, valloader):
model_g = ModifiedGenerator(80, 4).to(device)
print("Generator : \n")
num_params(model_g)
model_d = MultiScaleDiscriminator().to(device)
print("Discriminator : \n")
num_params(model_d)
optim_g = torch.optim.Adam(model_g.parameters(),
lr=0.0001, betas=(0.5, 0.9))
optim_d = torch.optim.Adam(model_d.parameters(),
lr=0.0001, betas=(0.5, 0.9))
init_epoch = -1
step = 0
if chkpt_path is not None:
print("Resuming from checkpoint: %s" % chkpt_path)
checkpoint = torch.load(chkpt_path)
model_g.load_state_dict(checkpoint['model_g'])
model_d.load_state_dict(checkpoint['model_d'])
optim_g.load_state_dict(checkpoint['optim_g'])
optim_d.load_state_dict(checkpoint['optim_d'])
step = checkpoint['step']
init_epoch = checkpoint['epoch']
else:
print("Starting new training run.")
# this accelerates training when the size of minibatch is always consistent.
# if not consistent, it'll horribly slow down.
torch.backends.cudnn.benchmark = True
try:
model_g.train()
model_d.train()
stft_loss = MultiResolutionSTFTLoss()
criterion = torch.nn.MSELoss().to(device)
for epoch in itertools.count(init_epoch+1):
trainloader.dataset.shuffle_mapping()
loader = tqdm.tqdm(trainloader, desc='Loading train data')
avg_g_loss = []
avg_d_loss = []
avg_adv_loss = []
for (melG, audioG), \
(melD, audioD) in loader:
melG = melG.to(device) # torch.Size([16, 80, 64])
audioG = audioG.to(device) # torch.Size([16, 1, 16000])
melD = melD.to(device) # torch.Size([16, 80, 64])
audioD = audioD.to(device) #torch.Size([16, 1, 16000]
# generator
optim_g.zero_grad()
fake_audio = model_g(melG) # torch.Size([16, 1, 12800])
fake_audio = fake_audio[:, :, :16000]
sc_loss, mag_loss = stft_loss(fake_audio[:, :, :audioG.size(2)].squeeze(1), audioG.squeeze(1))
loss_g = sc_loss + mag_loss
adv_loss = 0.0
if step > 100000:
disc_real = model_d(audioG)
disc_fake = model_d(fake_audio)
# for multi-scale discriminator
for feats_fake, score_fake in disc_fake:
# adv_loss += torch.mean(torch.sum(torch.pow(score_fake - 1.0, 2), dim=[1, 2]))
adv_loss += criterion(score_fake, torch.ones_like(score_fake))
adv_loss = adv_loss / len(disc_fake) # len(disc_fake) = 3
# adv_loss = 0.5 * adv_loss
# loss_feat = 0
# feat_weights = 4.0 / (2 + 1) # Number of downsample layer in discriminator = 2
# D_weights = 1.0 / 7.0 # number of discriminator = 7
# wt = D_weights * feat_weights
if hp.model.feat_loss:
for (feats_fake, score_fake), (feats_real, _) in zip(disc_fake, disc_real):
for feat_f, feat_r in zip(feats_fake, feats_real):
adv_loss += hp.model.feat_match * torch.mean(torch.abs(feat_f - feat_r))
loss_g += hp.model.lambda_adv * adv_loss
loss_g.backward()
optim_g.step()
# discriminator
loss_d_avg = 0.0
if step > 100000:
fake_audio = model_g(melD)[:, :, :16000]
fake_audio = fake_audio.detach()
loss_d_sum = 0.0
for _ in range(1):
optim_d.zero_grad()
disc_fake = model_d(fake_audio)
disc_real = model_d(audioD)
loss_d = 0.0
loss_d_real = 0.0
loss_d_fake = 0.0
for (_, score_fake), (_, score_real) in zip(disc_fake, disc_real):
loss_d_real += criterion(score_real, torch.ones_like(score_real))
loss_d_fake += criterion(score_fake, torch.zeros_like(score_fake))
loss_d_real = loss_d_real / len(disc_real) # len(disc_real) = 3
loss_d_fake = loss_d_fake / len(disc_fake) # len(disc_fake) = 3
loss_d = loss_d_real + loss_d_fake
loss_d.backward()
optim_d.step()
loss_d_sum += loss_d
loss_d_avg = loss_d_sum / hp.train.rep_discriminator
loss_d_avg = loss_d_avg.item()
step += 1
# logging
loss_g = loss_g.item()
avg_g_loss.append(loss_g)
avg_d_loss.append(loss_d_avg)
avg_adv_loss.append(adv_loss)
if any([loss_g > 1e8, math.isnan(loss_g), loss_d_avg > 1e8, math.isnan(loss_d_avg)]):
print("loss_g %.01f loss_d_avg %.01f at step %d!" % (loss_g, loss_d_avg, step))
raise Exception("Loss exploded")
if step % 1 == 0:
print("Avg : g %.04f d %.04f ad %.04f| step %d" % (sum(avg_g_loss) / len(avg_g_loss),
sum(avg_d_loss) / len(avg_d_loss),
sum(avg_adv_loss) / len(avg_adv_loss),
step))
if epoch % 20 == 0:
save_path = os.path.join(pt_dir, '%04d.pt'
% (epoch))
torch.save({
'model_g': model_g.state_dict(),
'model_d': model_d.state_dict(),
'optim_g': optim_g.state_dict(),
'optim_d': optim_d.state_dict(),
'step': step,
'epoch': epoch,
}, save_path)
print("Saved checkpoint to: %s" % save_path)
except Exception as e:
print("Exiting due to exception: %s" % e)
traceback.print_exc()
|
# Generated by Django 2.0 on 2018-03-02 03:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('trade', '0004_item_verified'),
]
operations = [
migrations.CreateModel(
name='PasswordResetRequest',
fields=[
('key', models.CharField(default=b'd0b0eb7e4cb6c9d2b0f730a0de8590', max_length=30, primary_key=True, serialize=False)),
('valid', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trade.OneshirtUser')),
],
),
]
|
from dataclasses import dataclass
import numpy as np
"""
Useful function to compute various estimation errors
Copyright @donelef, @jbrouill on GitHub
"""
def fro_error(y: np.array, y_hat: np.array) -> float:
"""
Computes the Frobenius error of an estimation.
:param y: true parameters as numpy array
:param y_hat: estimated parameters as numpy array
:return: Frobenius norm of the estimation error
"""
return np.linalg.norm(y - y_hat, ('fro' if len(y.shape) > 1 else 2))
def max_error(y: np.array, y_hat: np.array) -> float:
"""
Computes the max error of an estimation.
:param y: true parameters as numpy array
:param y_hat: estimated parameters as numpy array
:return: infinity norm of the estimation error
"""
return np.max(np.abs(y - y_hat))
def rrms_error(y: np.array, y_hat: np.array) -> float:
"""
Computes the RRMS error of an estimation.
:param y: true parameters as numpy array
:param y_hat: estimated parameters as numpy array
:return: Frobenius norm of the relative estimation error, as percentage
"""
return fro_error(y, y_hat) / np.linalg.norm(y, ('fro' if len(y.shape) > 1 else 2)) * 100
def map_error(y: np.array, y_hat: np.array) -> float:
"""
Computes the average relative error of an estimation with known topology.
This looks only at the error on non-zero values.
:param y: true parameters as numpy array
:param y_hat: estimated parameters as numpy array
:return: MAP estimation error, as percentage
"""
y_non_zero = y[y != 0]
y_hat_non_zero = y_hat[y != 0]
return np.linalg.norm(np.abs(y_non_zero - y_hat_non_zero)) / np.linalg.norm(np.abs(y_non_zero)) * 100
@dataclass
class ErrorMetrics:
fro_error: float
max_error: float
rrms_error: float
map_error: float
def error_metrics(y: np.array, y_hat: np.array) -> ErrorMetrics:
return ErrorMetrics(
fro_error=fro_error(y, y_hat),
max_error=max_error(y, y_hat),
rrms_error=rrms_error(y, y_hat),
map_error=map_error(y, y_hat)
)
|
from setuptools import setup, find_packages
setup(
name='doctools',
version='0.2.2',
description='docblock manipulation utilities',
long_description=open('README.rst').read(),
py_modules=['doctools'],
install_requires=['pytest', 'pytest-cov'],
author = 'Adam Wagner',
author_email = 'awagner83@gmail.com',
url = 'https://github.com/awagner83/doctools',
license = 'BSD3',
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Software Development',
'Intended Audience :: Developers',
'Programming Language :: Python',
]
)
|
import pandas as pd
import os
test = os.listdir('test_labels')
train = os.listdir('train_labels')
test_labels = pd.DataFrame()
train_labels = pd.DataFrame()
test_labels = pd.concat([pd.read_csv('test_labels/'+i, index_col = 0)
for i in test]).drop_duplicates()
print('Hemos creado la test')
train_labels = pd.concat([pd.read_csv('train_labels/'+i, index_col = 0)
for i in train]).drop_duplicates()
print('Hemos creado la train')
f = set(open('t.txt', 'r').read().split('\n'))
test_labels = test_labels[test_labels.index.to_series().apply(lambda x: x in f)]
print('Test conseguido')
train_labels = train_labels[train_labels.index.to_series().apply(lambda x: x in f)]
print('Train conseguido')
test_labels.to_csv('test_labels.csv')
train_labels.to_csv('train_labels.csv')
|
# LyricsGenius
# Copyright 2018 John W. Miller
# See LICENSE for details.
import json
import os
class Artist(object):
"""An artist with songs from the Genius.com database."""
def __init__(self, json_dict):
""" Artist Constructor
Properties:
name: Artist name.
image_url: URL to the artist image on Genius.com
songs: List of the artist's Song objects
num_songs: Number of songs in the Artist object
Methods:
add_song: Add a song to the Artist object
save_lyrics: Save the lyrics to a JSON or TXT file
"""
self._body = json_dict['artist']
self._url = self._body['url']
self._api_path = self._body['api_path']
self._id = self._body['id']
self._songs = []
self._num_songs = len(self._songs)
self._songs_dropped = 0
def __len__(self):
return 1
@property
def name(self):
return self._body['name']
@property
def image_url(self):
if 'image_url' in self._body:
return self._body['image_url']
@property
def songs(self):
return self._songs
@property
def num_songs(self):
return self._num_songs
def add_song(self, newsong, verbose=True):
"""Add a Song object to the Artist object"""
if any([song.title == newsong.title for song in self._songs]):
if verbose:
print('{s} already in {a}, not adding song.'.format(s=newsong.title,
a=self.name))
return 1 # Failure
if newsong.artist == self.name:
self._songs.append(newsong)
self._num_songs += 1
return 0 # Success
if verbose:
print("Can't add song by {b}, artist must be {a}.".format(b=newsong.artist,
a=self.name))
return 1 # Failure
def get_song(self, song_name):
"""Search Genius.com for *song_name* and add it to artist"""
raise NotImplementedError("I need to figure out how to allow Artist() to access Genius.search_song().")
# song = Genius.search_song(song_name, self.name)
# self.add_song(song)
# return
# TODO: define an export_to_json() method
def save_lyrics(self, filename=None, format_='json', overwrite=False,
skip_duplicates=True, verbose=True, binary_encoding=False):
"""Allows user to save all lyrics within an Artist obejct"""
format_ = format_.lstrip(".")
assert (format_ == 'json') or (format_ == 'txt'), "format_ must be JSON or TXT"
# We want to reject songs that have already been added to artist collection
def songsAreSame(s1, s2):
from difflib import SequenceMatcher as sm
# Idea credit: https://bigishdata.com/2016/10/25/
seqA = sm(None, s1.lyrics, s2['lyrics'])
if seqA.ratio() > 0.4:
seqB = sm(None, s2['lyrics'], s1.lyrics)
return seqA.ratio() > 0.5 or seqB.ratio() > 0.5
return False
def songInArtist(new_song):
# artist_lyrics is global (works in Jupyter notebook)
for song in lyrics_to_write['songs']:
if songsAreSame(new_song, song):
return True
return False
# Determine the filename
if filename:
# Remove format suffix if supplied by user
for ext in ["txt", "TXT", "json", "JSON"]:
filename = filename.replace("." + ext, "")
filename += "." + format_
else:
filename = "Lyrics_{}.{}".format(self.name.replace(" ", ""), format_)
# Check if file already exists
write_file = False
if not os.path.isfile(filename):
write_file = True
elif overwrite:
write_file = True
else:
if input("{} already exists. Overwrite?\n(y/n): ".format(filename)).lower() == 'y':
write_file = True
# Format lyrics as either .txt or .json
if format_ == 'json':
lyrics_to_write = {'songs': [], 'artist': self.name}
for song in self.songs:
# This takes way too long! It's basically O(n^2), can I do better?
if skip_duplicates is False or not songInArtist(song):
lyrics_to_write['songs'].append({})
lyrics_to_write['songs'][-1]['title'] = song.title
lyrics_to_write['songs'][-1]['album'] = song.album
lyrics_to_write['songs'][-1]['year'] = song.year
lyrics_to_write['songs'][-1]['lyrics'] = song.lyrics
lyrics_to_write['songs'][-1]['image'] = song.song_art_image_url
lyrics_to_write['songs'][-1]['artist'] = self.name
lyrics_to_write['songs'][-1]['raw'] = song._body
else:
self._songs_dropped += 1
if verbose:
print("SKIPPING \"{}\" (already found in artist collection)".format(song.title))
else:
lyrics_to_write = " ".join([s.lyrics + 5*'\n' for s in self.songs])
if binary_encoding:
lyrics_to_write = lyrics_to_write.encode('utf8')
# Write the lyrics to either a .json or .txt file
if write_file:
with open(filename, 'wb' if binary_encoding else 'w') as lyrics_file:
if format_ == 'json':
json.dump(lyrics_to_write, lyrics_file)
else:
lyrics_file.write(lyrics_to_write)
if verbose:
print('Wrote {} songs to {}.'.format(self.num_songs - self._songs_dropped, filename))
else:
if verbose:
print('Skipping file save.\n')
return lyrics_to_write
def __str__(self):
"""Return a string representation of the Artist object."""
msg = "{name}, {num} songs".format(name=self.name, num=self._num_songs)
msg = msg[:-1] if self._num_songs == 1 else msg
return msg
def __repr__(self):
msg = "{num} songs".format(num=self._num_songs)
msg = repr((self.name, msg[:-1])) if self._num_songs == 1 else repr((self.name, msg))
return msg
|
def hybrid(max_duration: float) -> None:
pass
|
from random import randint
from django.core.cache import cache
from conf_site.proposals.tests import ProposalTestCase
from conf_site.reviews.models import ProposalVote
from conf_site.reviews.tests.factories import ProposalVoteFactory
class ProposalVoteCountRefreshTestCase(ProposalTestCase):
def setUp(self):
super(ProposalVoteCountRefreshTestCase, self).setUp()
self.vote_cache_keys = [
"proposal_{}_plus_one".format(self.proposal.pk),
"proposal_{}_plus_zero".format(self.proposal.pk),
"proposal_{}_minus_zero".format(self.proposal.pk),
"proposal_{}_minus_one".format(self.proposal.pk),
]
def test_no_votes(self):
"""Verify that refreshing counts returns zero if there are no votes."""
# Make sure that this proposal has no votes.
self.assertEqual(self.proposal.review_votes.count(), 0)
self.proposal._refresh_vote_counts()
for cache_key in self.vote_cache_keys:
self.assertEqual(cache.get(cache_key), 0)
self.assertEqual(self.proposal.total_votes, 0)
def test_setting_votes(self):
# Create a random assortment of votes on our proposal.
plus_one_votes = ProposalVoteFactory.create_batch(
size=randint(0, 3),
proposal=self.proposal,
score=ProposalVote.PLUS_ONE,
)
plus_zero_votes = ProposalVoteFactory.create_batch(
size=randint(0, 3),
proposal=self.proposal,
score=ProposalVote.PLUS_ZERO,
)
minus_zero_votes = ProposalVoteFactory.create_batch(
size=randint(0, 3),
proposal=self.proposal,
score=ProposalVote.MINUS_ZERO,
)
minus_one_votes = ProposalVoteFactory.create_batch(
size=randint(0, 3),
proposal=self.proposal,
score=ProposalVote.MINUS_ONE,
)
self.proposal._refresh_vote_counts()
# Tally up vote counts and verify that the cached value is correct.
vote_counts = [
len(plus_one_votes),
len(plus_zero_votes),
len(minus_zero_votes),
len(minus_one_votes),
]
for index, cache_key in enumerate(self.vote_cache_keys):
self.assertEqual(cache.get(cache_key), vote_counts[index])
self.assertEqual(self.proposal.total_votes, sum(vote_counts))
|
import time
import json
import sys
import hashlib
import operator
import numpy as np
import os
from datetime import datetime
import re
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
import matplotlib.dates as mdates
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.autolayout': True})
rcParams.update({'errorbar.capsize': 2})
# increase font
font = {'weight' : 'medium',
'size' : 16}
matplotlib.rc('font', **font)
colors = [
'tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple',
'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan', 'black'
]
def plot_regular_proportion(summary, output_dir):
print('plot_regular_proportion', output_dir)
# fig, ax = plt.subplots(figsize=(24, 12))
MODES = ['Classic', 'PortLess', 'SubnetLess8', 'SubnetLess16', 'NSLookup', 'DomainNoDigit']
percentages = {
MODE: {
'reguarl1': {'count': [], 'volume': [], 'max_int': []},
'reguarl2': {'count': [], 'volume': [], 'max_int': []},
'reguarl3': {'count': [], 'volume': []},
}
for MODE in MODES
}
for trace_id, trace in summary.items():
for device_id, device in trace.items():
# if 'noport' in trace_id:
for MODE in MODES:
if MODE in trace_id:
percentages[MODE]['reguarl1']['count'].append(
100 * device['regular_flows1']['count'] / float(device['regular_flows1']['count'] + device['nonrg_flows1']['count'])
)
percentages[MODE]['reguarl1']['volume'].append(
100 * device['regular_flows1']['vol'] / float(device['regular_flows1']['vol'] + device['nonrg_flows1']['vol'])
)
percentages[MODE]['reguarl1']['max_int'].append(
device['regular_flows1']['max_int']
)
percentages[MODE]['reguarl2']['count'].append(
100 * device['regular_flows2']['count'] / float(device['regular_flows2']['count'] + device['nonrg_flows2']['count'])
)
percentages[MODE]['reguarl2']['volume'].append(
100 * device['regular_flows2']['vol'] / float(device['regular_flows2']['vol'] + device['nonrg_flows2']['vol'])
)
percentages[MODE]['reguarl2']['max_int'].append(
device['regular_flows2']['max_int']
)
percentages[MODE]['reguarl3']['count'].append(
100 * device['regular_flows3']['count'] / float(device['regular_flows3']['count'] + device['nonrg_flows3']['count'])
)
percentages[MODE]['reguarl3']['volume'].append(
100 * device['regular_flows3']['vol'] / float(device['regular_flows3']['vol'] + device['nonrg_flows3']['vol'])
)
plt.clf()
for i in range(len(MODES)):
results = percentages[MODES[i]]['reguarl1']['count']
x, y = sorted(results), np.arange(1, 1 + len(results)) / len(results)
plt.plot(x, y, label=MODES[i], color=colors[i], linewidth = 2)
plt.ylim(0, 1)
plt.ylabel('CDF (0-1)')
plt.xlabel('Predictable (%)')
plt.tight_layout()
plt.grid(True)
plt.legend()#prop={'size': 10})
plt.savefig(os.path.join(output_dir, 'cdf_predict1_count.pdf'))
plt.clf()
for i in range(len(MODES)):
results = percentages[MODES[i]]['reguarl2']['count']
x, y = sorted(results), np.arange(1, 1 + len(results)) / len(results)
plt.plot(x, y, label=MODES[i], color=colors[i], linewidth = 2)
plt.ylim(0, 1)
plt.ylabel('CDF (0-1)')
plt.xlabel('Predictable (%)')
plt.tight_layout()
plt.grid(True)
plt.legend()#prop={'size': 10})
plt.savefig(os.path.join(output_dir, 'cdf_predict2_count.pdf'))
plt.clf()
for i in range(len(MODES)):
results = percentages[MODES[i]]['reguarl1']['volume']
x, y = sorted(results), np.arange(1, 1 + len(results)) / len(results)
plt.plot(x, y, label=MODES[i], color=colors[i], linewidth = 2)
plt.ylim(0, 1)
plt.ylabel('CDF (0-1)')
plt.xlabel('Predictable (%)')
plt.tight_layout()
plt.grid(True)
plt.legend()#prop={'size': 10})
plt.savefig(os.path.join(output_dir, 'cdf_predict1_volume.pdf'))
plt.clf()
for i in range(len(MODES)):
results = percentages[MODES[i]]['reguarl2']['volume']
x, y = sorted(results), np.arange(1, 1 + len(results)) / len(results)
plt.plot(x, y, label=MODES[i], color=colors[i], linewidth = 2)
plt.ylim(0, 1)
plt.ylabel('CDF (0-1)')
plt.xlabel('Predictable (%)')
plt.tight_layout()
plt.grid(True)
plt.legend()#prop={'size': 10})
plt.savefig(os.path.join(output_dir, 'cdf_predict2_volume.pdf'))
plt.clf()
for i in range(len(MODES)):
results = percentages[MODES[i]]['reguarl1']['max_int']
x, y = sorted(results), np.arange(1, 1 + len(results)) / len(results)
plt.plot(x, y, label=MODES[i], color=colors[i], linewidth = 2)
plt.ylim(0, 1)
plt.ylabel('CDF (0-1)')
plt.xlabel('Maximal Interval For Predictable Traffic (s)')
plt.tight_layout()
plt.grid(True)
plt.legend()#prop={'size': 10})
plt.savefig(os.path.join(output_dir, 'cdf_predict1_maxint.pdf'))
plt.clf()
for i in range(len(MODES)):
results = percentages[MODES[i]]['reguarl2']['max_int']
x, y = sorted(results), np.arange(1, 1 + len(results)) / len(results)
plt.plot(x, y, label=MODES[i], color=colors[i], linewidth = 2)
plt.ylim(0, 1)
plt.ylabel('CDF (0-1)')
plt.xlabel('Maximal Interval For Predictable Traffic (s)')
plt.tight_layout()
plt.grid(True)
plt.legend()#prop={'size': 10})
plt.savefig(os.path.join(output_dir, 'cdf_predict2_maxint.pdf'))
if __name__ == "__main__":
if len(sys.argv) < 2:
print('Input format: python plot_data_trans.py input_dir [output_dir]')
exit()
elif len(sys.argv) == 2:
input_dir = sys.argv[1]
output_dir = './'
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# details_file = os.path.join(input_dir, 'regular_details.json')
summary_file = os.path.join(input_dir, 'regular_summary.json')
# details = json.load(open(details_file, 'r'))
summary = json.load(open(summary_file, 'r'))
plot_regular_proportion(summary, output_dir)
# python3 plot_regular.py results/yourthing results/yourthing
|
import yaml
from pymongo import MongoClient, ASCENDING, DESCENDING
from repo.controllers.path_manager import MONGO_CONFIG_PATH
class MongoManager():
def __init__(self, host=None, port=None):
if host is None:
with open(MONGO_CONFIG_PATH) as f:
conf = yaml.safe_load((f.read()))
host = conf['host']
if port is None:
with open(MONGO_CONFIG_PATH) as f:
conf = yaml.safe_load((f.read()))
port = conf['port']
self.host = host
self.port = port
self.db = None
self.client = MongoClient(host, port) # MongoDBに接続する。
self.collection = None
def set_db(self, db_name):
self.db = getattr(self.client, db_name)
def set_collection(self, collecton_name):
self.collection = getattr(self.db, collecton_name)
def get_latest_record(self):
return self.collection.find().sort("time", DESCENDING)[0]
def initialize_collection(self):
return self.collection.delete_many({})
if __name__ == '__main__':
pass
|
import basic
tools = basic.tools()
crawler = basic.crawlerComponent()
token = ''
test = 'https://u19481746.pipipan.com/fs/19481746-372048608'
result = tools.solveCtLink(test,crawler)
if result != 'error':
link = tools.shortLink(result,token,crawler)
else:
link = 'error'
print(link)
|
from .page_all_requests import * # noqa
from .page_request_details import * # noqa
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from utils.utils import my_shuffle
from utils.emoji import Emoji
def job(bot):
print("Sending message...")
msg = "#bomdia Smininos! É hora de acordar... {}".format(
Emoji.SUN_BEHIND_CLOUD,
Emoji.SMILING_FACE_WITH_OPEN_MOUTH)
chat_id = '-58208727'
bot.sendMessage(chat_id, msg)
bot.sendChatAction(chat_id, 'upload_document')
gifs = [
'BQADBAADgAMAAnIdZAdIwXN1oJLFZwI',
'BQADBAAD5gMAAuMZZAer90C0u8hN9wI',
'BQADBAADSgMAAmAZZAc8QCM-wgy6gwI',
'BQADBAADlQMAAosbZAcK-YB68LLP1QI',
'BQADBAADIwMAAj8cZAfvVsu1eC6qqgI',
'BQADBAADiAMAAsEaZAfUliUupYKU5AI',
]
bot.sendDocument(chat_id, random.choice(my_shuffle(gifs)))
print 'Sent'
|
from os import path
from shorter.start.environment import BASE_DIR, MIGR_DIR, ROOT_DIR, THIS_DIR
TEST_DIR = path.abspath(path.dirname(path.dirname(__file__)))
def test_rootdir():
assert ROOT_DIR == path.dirname(TEST_DIR)
def test_basedir():
assert BASE_DIR == path.join(path.dirname(TEST_DIR), "shorter")
def test_thisdir():
assert THIS_DIR == path.join(path.dirname(TEST_DIR), "shorter", "start")
def test_migratedir():
assert MIGR_DIR == path.join(path.dirname(TEST_DIR), "migrate")
|
import pandas as pd
from os import remove
import re
from config import HOME_URL
import logging
from dbhelper import DBHelper
import io
from datetime import datetime, timezone, timedelta
from config import SALT
import hashlib
class ParkMap:
park_data = []
asc_columns = [4,7,10,13,16,19,22]
desc_columns = [5,8,11,14,17,20]
blank_columns = [3,6,9,12,15,18,21]
HTML_FILE = 'map.html'
parking = list(range(1,319,1))
last_num = 0
def _build_map(self):
self.parking = list(range(1,319,1))
self.park_data = []
for col in range(3,23,1):
for row in range(1,12,1):
if col in self.asc_columns:
mm_cell = {}
mm_cell['row'] = row
mm_cell['column'] = col
last_num = self.parking.pop(0)
mm_cell['parking'] = last_num
mm_cell['busy'] = 0
mm_cell['rent'] = 0
self.park_data.append(mm_cell)
if col in self.desc_columns:
mm_cell = {}
mm_cell['row'] = 12 - row
mm_cell['column'] = col
last_num = self.parking.pop(0)
mm_cell['parking'] = last_num
mm_cell['busy'] = 0
mm_cell['rent'] = 0
self.park_data.append(mm_cell)
for col in range(24,3,-1):
for row in range(14,25,1):
if col in self.asc_columns:
mm_cell = {}
mm_cell['row'] = row
mm_cell['column'] = col
last_num = self.parking.pop(0)
mm_cell['parking'] = last_num
mm_cell['busy'] = 0
mm_cell['rent'] = 0
self.park_data.append(mm_cell)
if col in self.desc_columns:
mm_cell = {}
mm_cell['row'] = 38 - row
mm_cell['column'] = col
last_num = self.parking.pop(0)
mm_cell['parking'] = last_num
mm_cell['busy'] = 0
mm_cell['rent'] = 0
self.park_data.append(mm_cell)
for row in range(24,6,-1):
for col in range(2,0,-1):
if row in [12,13]:
continue
mm_cell = {}
mm_cell['row'] = row
mm_cell['column'] = col
last_num = self.parking.pop(0)
mm_cell['parking'] = last_num
mm_cell['busy'] = 0
mm_cell['rent'] = 0
self.park_data.append(mm_cell)
for col in range(1,23,1):
for row in range(0,26,1):
mm_cell = {}
#first blanks
if row in [0]:
if col in [2,9,18]:
mm_cell['parking'] = '||||__||||'
else:
mm_cell['parking'] = '||||||||||'
mm_cell['column'] = col
mm_cell['row'] = row
self.park_data.insert(0, mm_cell)
elif (col in [1,2]) and (row in [1,2,3,4,5,6]):
if row in [1,2]:
mm_cell['parking'] = '.'
elif row in [3,4]:
mm_cell['parking'] = '<='
else:
mm_cell['parking'] = '=>'
mm_cell['column'] = col
mm_cell['row'] = row
self.park_data.append(mm_cell)
elif ((col in self.blank_columns) and row != 25):
mm_cell['parking'] = '.'
mm_cell['column'] = col
mm_cell['row'] = row
self.park_data.append(mm_cell)
elif row in [12,13]:
mm_cell['parking'] = ' . '
mm_cell['column'] = col
mm_cell['row'] = row
self.park_data.append(mm_cell)
elif row in [25]:
if col in [2,9,18]:
mm_cell['parking'] = '||||__||||'
else:
mm_cell['parking'] = '||||||||||'
mm_cell['column'] = col
mm_cell['row'] = row
self.park_data.append(mm_cell)
@staticmethod
def is_mm_number(row_string):
row_string = str(row_string).strip()
mm_pattern = re.compile(r'^<td.*background-color.*white.*>\d+<\/td>')
return mm_pattern.match(row_string)
@staticmethod
def __get_mm_number(row_string):
mm_pattern = re.compile(r'<td.*background-color.*white.*>(\d+)<\/td>')
found_num = mm_pattern.search(row_string).group(1)
return int(found_num)
@staticmethod
def highlight_mm_number(html_string):
found_num = ''
try:
mm_pattern = re.compile(r'<td.*background-color.*white.*>(\d+)<\/td>')
found_num = mm_pattern.search(html_string).group(1)
html_string = re.sub("white\">" + found_num + "</td>", "cyan\">" + found_num + "</td>", html_string)
except AttributeError:
found_num = ''
return html_string
async def draw_map(self, dataset):
self._build_map()
logging.info("MAP WAS BUILDED")
df = pd.DataFrame(data=self.park_data)
str_io = io.StringIO()
df.pivot(index='row',columns='column', values='parking') \
.rename_axis(None, axis=1) \
.to_html(buf=str_io, header=False, index=False)
html_str = str_io.getvalue()
html_str_format_io = io.StringIO()
for line in html_str.split('\n'):
line = line.replace('border="1"','border="0"')
line = line.replace('<td>','<td style="width:30;background-color: white">')
if ParkMap.is_mm_number(line):
if ParkMap.__get_mm_number(line) in dataset:
line = ParkMap.highlight_mm_number(line)
html_str_format_io.write(line + '\n')
html_str_format = html_str_format_io.getvalue()
if html_str_format:
db = DBHelper()
html_db_data = await db.save_html(html_str_format)
logging.info("saved to db")
del db
logging.info("map file was created")
return html_db_data
@staticmethod
async def show_map():
logging.info("show_map")
db = DBHelper()
map_key = await db.get_map_key()
logging.info(map_key)
offset = timedelta(hours=3)
tz = timezone(offset, name='МСК')
now = datetime.now(tz=tz)
date_today = now.strftime("%d/%m/%Y")
str_hash = date_today + " " + SALT
map_key_new = hashlib.md5(str_hash.encode('utf-8')).hexdigest()
if (str(map_key_new).lower() != str(map_key).lower()):
await db.update_map_key(map_key_new)
map_key = await db.get_map_key()
logging.info(map_key)
del db
return HOME_URL + "index.php?key=" + map_key
|
# aln_stats has functions
# Should raise warning when there are empty comparisons
# should avoid capitalizing sequences
#------------------------------------------------------------
import sys, os, re
import pandas as pd
from itertools import combinations
from utils import get_file_paths_from_dir, substring_delim, cds_to_dict, map_dict_vals
from stats import compare_aln_seqs, count_gap_block_in_sequence, count_aln_gaps, count_str_chars, get_longest_aligned_blocks_between_aligned_seqs
#------------------------------------------------------------
def get_aln_stats_from_path(path_to_aln, gap_char = '-', amb_char = 'N', verbose = 0):
"""takes an input path to an FASTA-formatted alignment of a pair of sequences
and returns a list containing the names of the alignment file,
samples names, and alignment statistics.
statistics are:
- length of the alignment
- number of aligned sites
- sequence length of sequence 1 and 2
- number of mismatches between sequence 1 and 2
- number of gap blocks (an uninterrupted stretch of gaps)
- number of gaps
parameters
----------
path_to_aln: str
path to the input alignment file
gap_char: str
character specifying a gap in the sequence
verbose: int, 0 or 1
set to 1 for debugging output
returns
-------
list"""
assert os.path.isfile(path_to_aln) == True, 'your input path was not a valid path'
assert isinstance(gap_char, str), 'your gap character was not a string'
assert isinstance(verbose, int), 'your input for verbose was not an integer'
assert verbose in [0,1], 'your input for verbose must be 0 or 1'
pairwise_comparisons = []
# reads the alignment into a dictionary, sample names are mapped to sample sequences
# converts the dictionary into a dataframe
aln_dict = cds_to_dict(path_to_aln, delim2 = 'lastpos')
cds = map_dict_vals(aln_dict, str.upper) # capitalizes sequence characters
# get combinations
sample_pairs = list(combinations(aln_dict.keys(), 2))
for pair in sample_pairs:
# extracts the names and sequences of first and second samples
id1 = pair[0]
id2 = pair[1]
seq1 = aln_dict[id1]
seq2 = aln_dict[id2]
if verbose == 1:
print('current ids: {}, {}'.format(id1, id2))
print('current seqs: {}, {}'.format(seq1, seq2))
# extract statistics: alignment length, number of aligned sites and number of mismatches
aln_len, aln_site_num, site_match, mismatch_num = compare_aln_seqs(seq1, seq2, gap_char = gap_char)
longest_aln_block = get_longest_aligned_blocks_between_aligned_seqs(seq1, seq2, gap_char = gap_char)
# listing for pair
pairwise_comparisons.append([os.path.basename(path_to_aln),id1, id2, aln_len,
aln_site_num, count_str_chars(seq1, [gap_char]), count_str_chars(seq2, [gap_char]), count_str_chars(seq1, [gap_char, amb_char]), count_str_chars(seq2, [gap_char, amb_char]),
site_match, mismatch_num, longest_aln_block, count_gap_block_in_sequence(seq1, gap_char), count_gap_block_in_sequence(seq2, gap_char),
count_aln_gaps(seq1, gap_char), count_aln_gaps(seq2, gap_char)])
# outputs list in order described in description
return pairwise_comparisons
#---------------------------------------------------------------------------------
def get_aln_stats_from_aln_dir(aln_dir_path, gap_char = '-', amb_char = 'N', aln_suffix = '.aln', verbose = 0):
"""takes an path to a directory and a tring that specifies
a suffixes of alignment files in the directory. returns a table
containing the names of each alignment file, its sample names
and alignment statistics.
parameters
----------
aln_dir_path: str
path to an directory containing fasta-formatted alignments
aln_suffix: str
a string that specifies the file extension name of each alignment file
gap_char: str
character specifying a gap in the sequence
verbose: int, 0 or 1
set to 1 for debugging output
returns
-------
pandas.DataFrame"""
assert isinstance(aln_dir_path, str) == True, 'your alignment directory was not a string'
assert isinstance(aln_suffix, str) == True, 'your input for aln_suffix was not a string'
assert os.path.isdir(aln_dir_path) == True, 'your input path was not a valid path'
assert isinstance(gap_char, str), 'your gap character was not a string'
assert isinstance(verbose, int), 'your input for verbose was not an integer'
assert verbose in [0,1], 'your input for verbose must be 0 or 1'
alns = []
aln_paths = get_file_paths_from_dir(aln_dir_path, ext = [aln_suffix])
if verbose == 1:
pass
# loop over alignment paths
# compute statistics and append to a list
for path in aln_paths:
alns.append(pd.DataFrame(get_aln_stats_from_path(path, gap_char = gap_char)))
#return alns
# convert list of aln statistics into a table
alns = pd.concat(alns)
alns.index = range(1, len(alns) + 1)
alns.columns = ['aln_name', 'sp1_ID', 'sp2_ID', 'aln_len', 'aln_sites', 'sp1_seqlen_w_N', 'sp2_seqlen_w_N','sp1_seqlen_wo_N', 'sp2_seqlen_wo_N', 'matches', 'mismatches', 'longest_aligned_block',
'sp1_gap_blocks', 'sp2_gap_blocks', 'sp1_gaps', 'sp2_gaps']
#alns['sp1_5p_exon_endpos'] = alns['sp1_ID'].apply(lambda x: x.split(':')[1])
#alns['sp2_5p_exon_endpos'] = alns['sp2_ID'].apply(lambda x: x.split(':')[1])
#alns['sp1_ID'] = alns['sp1_ID'].apply(lambda x: x.split(':')[0])
#alns['sp2_ID'] = alns['sp2_ID'].apply(lambda x: x.split(':')[0])
return alns.loc[:, ['aln_name', 'sp1_ID', 'sp2_ID', 'aln_len', 'aln_sites', 'sp1_seqlen_w_N', 'sp2_seqlen_w_N','sp1_seqlen_wo_N', 'sp2_seqlen_wo_N', 'matches',
'mismatches', 'longest_aligned_block', 'sp1_gap_blocks', 'sp2_gap_blocks', 'sp1_gaps',
'sp2_gaps']]
|
import os
class NCEOptions(object):
""" Default options for the noise contrastive estimation loss criterion. Modify as needed. """
def __init__(self):
self.num_sampled = 25
self.remove_accidental_hits = True
self.subtract_log_q = True
self.unique = True
self.array_path = os.path.join(os.curdir, 'sampling_array.p')
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: settings
Description :
Author : cat
date: 2018/1/22
-------------------------------------------------
Change Activity:
2018/1/22:
-------------------------------------------------
# (57,48,90) --> dark_night
"""
class Settings(object):
"""
存储《外星人入侵》的所有设置类
"""
def __init__(self):
"""初始化游戏的设置"""
# 屏幕设置 (1200,800)
self.screen_width = 800
self.screen_height = 600
self.bg_color = (57, 48, 90)
# 设置移动速度
self.ship_speed = 5.5
|
from redbot.core.bot import Red
from .banmessage import BanMessage
async def setup(bot: Red) -> None:
cog = BanMessage(bot)
bot.add_cog(cog)
|
from helper.pre_processing import *
video_info = video_files[0]
data = generate_pickle(video_info, 'a')
for i in range(0, 100):
img_path = os.path.join(r'F:\DataSet\Aggression_Out\mv1', str(i).zfill(6) + '.jpg')
test_path = os.path.join(r'F:\DataSet\Aggression_Out\test', str(i).zfill(6) + '.jpg')
img = cv2.imread(img_path)
temp = data[0][i]
bbox = cal_bbox(temp)
cv2.rectangle(img, (round(bbox[0]), round(bbox[1])), (round(bbox[2]), round(bbox[3])), (255, 0, 0))
temp = data[1][i]
bbox = cal_bbox(temp)
cv2.rectangle(img, (round(bbox[0]), round(bbox[1])), (round(bbox[2]), round(bbox[3])), (0, 255, 0))
cv2.imwrite(test_path, img)
print(1)
|
from math import radians, cos, sin
moves = []
def parse_line(line):
d, v = line[0], line[1:]
moves.append((d, int(v)))
with open('input', 'r') as f:
for line in f:
line = line.strip()
parse_line(line)
def rotate(waypoint, degrees):
r = radians(degrees)
x, y = waypoint
x_prime = int(round(x * cos(r) - y * sin(r)))
y_prime = int(round(x * sin(r) + y * cos(r)))
return (x_prime, y_prime)
def handle_move(d, v, pos):
x, y = pos
if d == 0:
x += v
elif d == 90:
y += v
elif d == 270:
y -= v
elif d == 180:
x -= v
return (x, y)
def move_ship(ship, way, v):
xs, ys = ship
xw, yw = way
xs += xw * v
ys += yw * v
return xs, ys
def go():
way = (10, 1)
ship = (0, 0)
for d, v in moves:
if d == "F":
ship = move_ship(ship, way, v)
elif d == "L":
way = rotate(way, v)
elif d == "R":
way = rotate(way, -v)
elif d == "E":
way = handle_move(0, v, way)
elif d == "N":
way = handle_move(90, v, way)
elif d == "W":
way = handle_move(180, v, way)
elif d == "S":
way = handle_move(270, v, way)
return ship
ship = go()
print(abs(ship[0]) + abs(ship[1]))
|
import os
os.system("ls -l; pip install discordpy-slash")
import webserver
import io
import re
import json
import time
import base64
import asyncio
import discord
import inspect
import aiohttp
import datetime
import textwrap
import traceback
import contextlib
from discordpy_slash import slash
from random import choice
from datetime import date
from utils import default
from discord.utils import get
from contextlib import redirect_stdout
from discord.ext import commands, tasks
from discord_webhook import DiscordWebhook, DiscordEmbed
from discord.ext.commands import has_permissions, MissingPermissions, errors
ESCAPE_REGEX = re.compile("[`\u202E\u200B]{3,}")
SIGKILL = 9
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix=["!", "<@!780320679886454784>", "<@!780320679886454784> "], intents=intents)
config = default.get("./config.json")
bot.remove_command('help')
logo = 'https://cdn.discordapp.com/icons/780278916173791232/9dbc0f39d731c76be13b4ed9fa471570.webp?size=1024'
# Status
@tasks.loop(seconds=10)
async def status_task():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f'{len(bot.users)} Members'))
await asyncio.sleep(10)
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='DiscordPython.tk'))
await asyncio.sleep(10)
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name='Command: !help'))
await asyncio.sleep(10)
# Bot Events Below:
# Bot Inizialization/ready event
@bot.event
async def on_ready():
bot.session = aiohttp.ClientSession(loop=bot.loop)
bot.owner_id = (await bot.application_info()).owner.id
print('')
count = 0
for guild in bot.guilds:
print("Connected to {}".format(guild))
count +=1
print('')
print('Coded by Benitz Original#1317')
print('')
status_task.start()
await slash.sync_all_commands(bot)
# Member Join Event
@bot.event
async def on_member_join(member):
if member.bot:
webhook = DiscordWebhook(url='https://discord.com/api/webhooks/780400771975086090/1aG9XbOqyGwRnEdvYie3lvUYAWYyiGkhU_y29TABVHy9_tG5wZd73Fe5TLG1ozG_MlFM')
embed = DiscordEmbed(title='<:a:780407857093935124> Bot Added', description=f'**Bot Name:**\n{member.name} \n\n**Bot ID:**\n{member.id} \n\n**Bot Invite:**\nhttps://discord.com/oauth2/authorize?client_id={member.id}&scope=bot&permissions=0', color=0x33cf25)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
webhook.add_embed(embed)
webhook.execute()
else:
responces = [f'{member.name} Has Joined!', f'Welcome {member.name}!', f'{member.name} joined **Discord.py For Beginner**!', f'{member.name} Joined', f'{member.name} Welcome!']
webhook = DiscordWebhook(url='https://discord.com/api/webhooks/780322258824200203/UGR3Yi6727QrvzAbBbr-UOy5T-tSeOvpTYcdEJR2lktSnrFK79LLbrw4d7MKjtBaA2e-')
embed = DiscordEmbed(title=f'{choice(responces)}', description=f'Hey <@{member.id}> Welcome to **Discord.py For Beginners**! \n\n Make sure you read <#780281370508394516> and you know the rules and all that good stuff!\n So you want help with your code? No worries! Simply head over to <#780278916173791235> to get help with code related to the discord.py library, or head on over to <#845963584106266654> for general Python help! \n Happy Coding and we hope you enjoy your stay!', color=0x33cf25)
embed.set_thumbnail(url=f'{member.avatar_url}')
embed.set_footer(text=f'New Discord.py Developer', icon_url=f'{logo}')
webhook.add_embed(embed)
webhook.execute()
fm = discord.Embed(description="This is a server for Discord.py Beginners. You may ask for support and help others, If are not a Beginner and you know a lot of Discord.py, you can get the helper rank! (There is mo process for getting the helper rank, you'll be given it manually if staff see you are decent and actively helping others)\n You can ask a staff member to receive the @help-me role, anyone can ping this role if they need help regarding Discord.py, or any help in general.\n\n Make sure you read and fully understand this channel <#780281370508394516>. You might not know why you were punished If you don't read or understand <#780281370508394516> \n\n If you get kicked or you leave the server [here is a link to join back](https://discord.gg/C8zFM3D2rn)!", color=0x2288c7)
fm.set_author(name="Discord.py For Beginners", icon_url=f"{logo}")
fm.set_footer(text='Discord.py Official Bot', icon_url=logo)
await member.send(embed=fm)
# Member Remove Event
@bot.event
async def on_member_remove(member):
if member.bot:
webhook = DiscordWebhook(url='https://discord.com/api/webhooks/780400771975086090/1aG9XbOqyGwRnEdvYie3lvUYAWYyiGkhU_y29TABVHy9_tG5wZd73Fe5TLG1ozG_MlFM')
embed = DiscordEmbed(title='<:r:780734645779693568> Bot Removed', description=f'**Bot Name:**\n{member.name} \n\n **Bot ID:**\n{member.id}', color=0x2F3136)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
webhook.add_embed(embed)
webhook.execute()
else:
return
# Error Handler
@bot.event
async def on_command_error(ctx, err):
if isinstance(err, errors.CommandOnCooldown):
await ctx.send(f":stopwatch: Command is on Cooldown, please try again in {err.retry_after:.2f} seconds.")
elif isinstance(err, errors.MissingPermissions):
await ctx.send(f"<:F:780326063120318465> You can't use that command.")
elif isinstance(err, errors.CommandNotFound):
pass
else:
webhook = DiscordWebhook(url='https://discord.com/api/webhooks/780323537507713035/2sOcxGJQvsSc3_UBGcBIX9bX7OdtmCjUqngeAIJG7hluCG8ZQ4m-YZafB_AARJ_RqzS9')
embed = DiscordEmbed(title='An Error has occurred', description=f'Error: \n ```py\n{err}```', color=0x2F3136)
embed.set_timestamp()
embed.set_thumbnail(url=f'{logo}')
webhook.add_embed(embed)
webhook.execute()
print(err)
# Deletion Log
@bot.event
async def on_message_delete(message):
if message.author.bot:
return
else:
webhook = DiscordWebhook(url='https://discord.com/api/webhooks/780323537507713035/2sOcxGJQvsSc3_UBGcBIX9bX7OdtmCjUqngeAIJG7hluCG8ZQ4m-YZafB_AARJ_RqzS9')
embed = DiscordEmbed(title='Message Deleted', description=f'**Message Author:** \n <@!{message.author.id}>({message.author.name}#{message.author.discriminator}) \n\n **Message Channel:**\n<#{message.channel.id}> \n\n **Message Content:** \n ```{message.content}```', color=0x2F3136)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
webhook.add_embed(embed)
webhook.execute()
# Bot Commands Below:
# ModMail End
@bot.command(name="modclose", description="Closes Modmail Conversation")
async def modclose(ctx, user: discord.Member):
await ctx.respond()
if ctx.author.guild_permissions.ban_members:
if ctx.channel.category_id == 781002010744979516:
notification = discord.Embed(title='ModMail Ended', description='This Modmail conversation has been ended, the Staff has been disconnected from the conversation.', color=0x2F3136)
notification.set_footer(text='Discord.py For Beginners', icon_url=f'{logo}')
await user.send(embed=notification)
await ctx.send('<:S:790882958574616616> ModMail Ended. Deleting Channel in 5 seconds')
await asyncio.sleep(5)
await ctx.channel.delete(reason='ModMail Support Ended.')
else:
await ctx.message.delete()
await ctx.send('<:F:780326063120318465> This channel is not a ModMail channel.', delete_after=3)
else:
await ctx.message.delete()
await ctx.send('<:F:780326063120318465> You are not a Administrator, and this is not a ModMail Channel.', delete_after=5)
# Source Command
@bot.command(name="source", description="Shows the bot's source code.", aliases = ["sourcecode", "source-code"])
async def source(ctx):
embed = discord.Embed(title='Discord.py Beginner Source-Code', description="Here is the source Code for Discord.py Beginner's Official Bot.\n https://github.com/BenitzCoding/Discord.py-Beginners", color=0x2F3136)
embed.set_image(url='https://media.discordapp.net/attachments/715492844768591945/783944318133600266/source.png?width=961&height=541')
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
# Reminder
@bot.command(name="reminder", description="reminds you something after said amount of time.", case_insensitive = True, aliases = ["remind", "remindme", "remind_me"])
@commands.bot_has_permissions(attach_files = True, embed_links = True)
async def reminder(ctx, time, *, reminder):
await ctx.respond()
print(time)
print(reminder)
user = ctx.message.author
embed = discord.Embed(color=0x2F3136)
embed.set_footer(text="Discord.py For Beginners", icon_url=f"{logo}")
seconds = 0
if reminder is None:
embed.add_field(name='Warning', value=' Run the command again but specify what do you want me to remind you about.') # Error message
if time.lower().endswith("d"):
seconds += int(time[:-1]) * 60 * 60 * 24
counter = f"{seconds // 60 // 60 // 24} days"
if time.lower().endswith("h"):
seconds += int(time[:-1]) * 60 * 60
counter = f"{seconds // 60 // 60} hours"
elif time.lower().endswith("m"):
seconds += int(time[:-1]) * 60
counter = f"{seconds // 60} minutes"
elif time.lower().endswith("s"):
seconds += int(time[:-1])
counter = f"{seconds} seconds"
if seconds == 0:
embed.add_field(name='Warning',
value='Please specify a proper duration, do `!help reminder` for more information.')
elif seconds < 300:
embed.add_field(name='Warning',
value='You have specified a too short duration!\nMinimum duration is 5 minutes.')
elif seconds > 7776000:
embed.add_field(name='Warning', value='You have specified a too long duration!\nMaximum duration is 90 days.')
else:
beforermd = discord.Embed(title='Reminder Set', description=f'You will be reminded in {counter}', color=0x2F3136)
beforermd.set_footer(text='Discord.py For Beginners', icon_url=logo)
afterrmd = discord.Embed(title='Reminder', description=f'**Your reminder:** \n {reminder} \n\n *reminder set {counter} ago*', color=0x2F3136)
afterrmd.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=beforermd)
await asyncio.sleep(seconds)
await ctx.send(embed=afterrmd)
return
await ctx.send(embed=embed)
# Tempban
@bot.command(name="tempban", description="Temps bans a user.", case_insensitive = True, aliases = ["temp-ban", "temp_ban"])
@commands.bot_has_permissions(ban_members = True)
async def tempban(ctx, user: discord.Member, time, *, reason):
await ctx.respond()
print(time)
print(reminder)
user = ctx.message.author
embed = discord.Embed(color=0x55a7f7, timestamp=datetime.utcnow())
embed.set_footer(text="Discord.py For Beginners", icon_url=f"{logo}")
seconds = 0
if reason is None:
await ctx.send('<:F:780326063120318465> User not banned, because no reason was specified.') # Error message
if time.lower().endswith("y"):
seconds += int(time[:-1]) * 60 * 60 * 24 * 365
counter = f"{seconds // 60 // 60 // 24 // 365} years"
if time.lower().endswith("d"):
seconds += int(time[:-1]) * 60 * 60 * 24
counter = f"{seconds // 60 // 60 // 24} days"
if time.lower().endswith("h"):
seconds += int(time[:-1]) * 60 * 60
counter = f"{seconds // 60 // 60} hours"
elif time.lower().endswith("m"):
seconds += int(time[:-1]) * 60 * 60 * 24 * 30
counter = f"{seconds // 60 // 60 // 24 // 30} months"
elif time.lower().endswith("s"):
seconds += int(time[:-1])
counter = f"{seconds} seconds"
if seconds == 0:
await ctx.send('<:F:780326063120318465> User not banned, because no time was specified.')
else:
audit = get(guild.channels, id=780323115360190484)
beforermd = discord.Embed(title='Banned User', description=f'User has been banned for {counter} \n\n **reason:**\n{reason}', color=0x2F3136)
beforermd.set_footer(text='Discord.py For Beginners', icon_url=logo)
log = discord.Embed(title='User Temp-Banned', description=f'**User:**\n<@!{user.id}>({user.name}#{user.discriminator}) \n\n **Moderator:**\n<@!{ctx.author.id}>({ctx.author.name}#{ctx.author.discriminator}) \n\n **Reason:**\n{reason}', color=0x2F3136)
log.set_footer(text='Discord.py For Beginners', icon_url=logo)
afterrmd = discord.Embed(title='User Unbanned', description=f'**User:**\n{user} \n\n **Unbanned after:**\n{counter}', color=0x2F3136)
afterrmd.set_footer(title='Discord.py For Beginners', icon_url=logo)
banned = discord.Embed(title='Discord.py For Beginners', description=f'You have been banned on **Discord.py For Beginners** for **{counter}**', color=0x2F3136)
banned.set_footer(text='Discord.py For Beginners', icon_url=logo)
await audit.send(embed=log)
await ctx.send(embed=beforermd)
await user.send(embed=banned)
await ctx.guild.unban(user, reason=reason)
await asyncio.sleep(seconds)
await ctx.guild.unban(user)
await audit.send(embed=afterrmd)
return
await ctx.send(embed=embed)
# ModMail Reply
@bot.command(name="modrep", description="Replies to ModMail.")
@commands.has_permissions(manage_messages=True)
async def modrep(ctx, user: discord.Member, *, message: str):
await ctx.respond()
try:
embed = discord.Embed(title='Modmail Support', description=f'{message}', color=0x2F3136)
embed.set_footer(text=f'Discord.py For Beginners', icon_url=logo)
await user.send(embed=embed)
await ctx.send(f"Notified User!")
except discord.Forbidden:
await ctx.send("Unable to notify user.")
# Add Bot Command
@bot.command(name="addbot", description="Gives a request to admins about adding a bot.")
async def addbot(ctx, url, *, reason):
await ctx.respond()
if reason is None:
reply = discord.Embed(title='Bot was not Requested', description='Your Bot was not requested, please specify a reason for your bot to be added.', color=0x2F3136)
reply.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.message.delete()
msg = await ctx.send(embed=reply, delete_after=5)
await asyncio.sleep(5)
await msg.delete()
else:
webhook = DiscordWebhook(url='webhook url')
embed = DiscordEmbed(title='New Bot Request', description=f'Bot Requested by <@!{ctx.author.id}> \n\n**Reason:**\n{reason}\n\n:link: [Bot Invite](https://discord.com/oauth2/authorize?client_id={url}&scope=bot&permissions=0)', color=0x2F3136)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
webhook.add_embed(embed)
webhook.execute()
webhook2 = DiscordWebhook(url='webhook url')
embed2 = DiscordEmbed(title='New Bot Request', description=f'Bot Requested by <@!{ctx.author.id}> \n\n**Reason to add bot:** \n{reason}', color=0x2F3136)
embed2.set_footer(text='Discord.py For Beginners', icon_url=logo)
webhook2.add_embed(embed2)
webhook2.execute()
reply = discord.Embed(title='Bot has been Requested', description='Your Bot has been requested, if this was a troll, or a prank, you will be punished.', color=0x2F3136)
reply.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.message.delete()
msg = await ctx.send(embed=reply, delete_after=5)
await asyncio.sleep(5)
await msg.delete()
#Bot Approve Command
@bot.command(name="approve", description="approves a bot request")
@commands.has_permissions(administrator=True)
async def approve(ctx, user: discord.Member, *, reason: commands.clean_content):
if reason is None:
webhook = DiscordWebhook(url='https://discord.com/api/webhooks/780400771975086090/1aG9XbOqyGwRnEdvYie3lvUYAWYyiGkhU_y29TABVHy9_tG5wZd73Fe5TLG1ozG_MlFM')
embed = DiscordEmbed(title='<:D:780326506366500864> Bot Request Approved', description=f'**Approved By:** {ctx.author.mention}({ctx.author.name}#{ctx.author.discriminator}) \n\n**Bot Owner:** {user.mention}({user.name}#{user.discriminator}) \n\n**Reason:**\n**NOT SPECIFIED**', color=0x2F3136)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
webhook.add_embed(embed)
webhook.execute()
await ctx.send('<:D:780326506366500864> Bot Approved')
else:
webhook = DiscordWebhook(url='https://discord.com/api/webhooks/780400771975086090/1aG9XbOqyGwRnEdvYie3lvUYAWYyiGkhU_y29TABVHy9_tG5wZd73Fe5TLG1ozG_MlFM')
embed = DiscordEmbed(title='<:D:780326506366500864> Bot Request Approved', description=f'**Approved By:** {ctx.author.mention}({ctx.author.name}#{ctx.author.discriminator}) \n\n**Bot Owner:** {user.mention}({user.name}#{user.discriminator}) \n\n**Reason:**\n{reason}', color=0x2F3136)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
webhook.add_embed(embed)
webhook.execute()
await ctx.send('<:D:780326506366500864> bot Approved')
# Bot Disapprove Command
@bot.command(name="disapprove", description="disapproves a bot request")
@commands.has_permissions(administrator=True)
async def disapprove(ctx, user: discord.Member, *, reason: commands.clean_content):
if reason is None:
webhook = DiscordWebhook(url='https://discord.com/api/webhooks/780400771975086090/1aG9XbOqyGwRnEdvYie3lvUYAWYyiGkhU_y29TABVHy9_tG5wZd73Fe5TLG1ozG_MlFM')
embed = DiscordEmbed(title='<:F:780326063120318465> Bot Request Disapproved', description=f'**Disapproved By:** {ctx.author.mention}({ctx.author.name}#{ctx.author.discriminator}) \n\n**Bot Owner:** {user.mention}({user.name}#{user.discriminator}) \n\n**Reason:**\n**NOT SPECIFIED**', color=0x2F3136)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
webhook.add_embed(embed)
webhook.execute()
await ctx.send('<:F:780326063120318465> Bot Disapproved!')
else:
webhook = DiscordWebhook(url='https://discord.com/api/webhooks/780400771975086090/1aG9XbOqyGwRnEdvYie3lvUYAWYyiGkhU_y29TABVHy9_tG5wZd73Fe5TLG1ozG_MlFM')
embed = DiscordEmbed(title='<:F:780326063120318465> Bot Request Disapproved', description=f'**Disapproved By:** {ctx.author.mention}({ctx.author.name}#{ctx.author.discriminator}) \n\n**Bot Owner:** {user.mention}({user.name}#{user.discriminator}) \n\n**Reason:**\n{reason}', color=0x2F3136)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
webhook.add_embed(embed)
webhook.execute()
await ctx.send('<:F:780326063120318465> Bot Disapproved')
# Help Group
@bot.command(name="help", description="Shows all commands")
async def help(ctx, command=None):
if command is None:
embed = discord.Embed(timestamp=ctx.message.created_at, title='Discord Python Official Bot', description='You can do `!help <command>` to get more info about the command.', color=0x2F3136)
embed.add_field(name='<:D:780326506366500864> Staff Commands', value='```approve, disapprove, modrep, modclose, check, shutdown```')
embed.add_field(name='<:C:780327572847853628> User Commands', value='```addbot, eval, ping, avatar, about, report, ticket, close```')
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "modrep":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='ModRep Command')
embed.add_field(name='Command Description:', value='This command is used to reply to the user on a modmail.', inline=False)
embed.add_field(name='Command Permissions:', value='`MANAGE_MESSAGES`', inline=False)
embed.add_field(name='Usage:', value='```py\n!modrep <@!userID> <messsage>```', inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "ping":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Ping Command')
embed.add_field(name='Command Description:', value="This command checks the bot's Webshock and Rest Ping.", inline=False)
embed.add_field(name='Command Permissions:', value='`@everyone`', inline=False)
embed.add_field(name='Usage:', value='```py\n!ping```', inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "report":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Report Command')
embed.add_field(name='Command Description:', value='This command sends and alert to Discord.py Staff members with your report reason.', inline=False)
embed.add_field(name='Command Permissions:', value='`@everyone`', inline=False)
embed.add_field(name='Usage:', value='```py\n!report <@!userID> <reason>```', inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "eval":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Eval Command')
embed.add_field(name='Command Description:', value='This command gets the given code and executes and gives the results of the given code, this is a way of testing your code.', inline=False)
embed.add_field(name='Command Permissions:', value='`@everyone`', inline=False)
embed.add_field(name='Usage:', value="```py\n-eval \nprint('test')```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "about":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='About Command')
embed.add_field(name='Command Description:', value='This command gives you general information about the Server.', inline=False)
embed.add_field(name='Command Permissions:', value='`@everyone`', inline=False)
embed.add_field(name='Usage:', value="```py\n!about```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "check":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Check Command')
embed.add_field(name='Command Description:', value='This command gets the user info of a mentioned user.', inline=False)
embed.add_field(name='Command Permissions:', value='`MANAGE_MESSAGES`', inline=False)
embed.add_field(name='Usage:', value="```py\n!check <@!userID>```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "avatar":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Avatar Command')
embed.add_field(name='Command Description:', value='This command shows the avatar of a mentioned user.', inline=False)
embed.add_field(name='Command Permissions:', value='``', inline=False)
embed.add_field(name='Usage:', value="```py\n!avatar <@!userID>``` or ```py\n!avatar```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "shutdown":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Shutdown Command')
embed.add_field(name='Command Description:', value='This command Shuts Down the bot.', inline=False)
embed.add_field(name='Command Permissions:', value='`OWNER_ONLY`', inline=False)
embed.add_field(name='Usage:', value="```py\n!shutdown```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "addbot":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Add Bot Command')
embed.add_field(name='Command Description:', value='This command sends a the bot request to the staff with a generated invite, for the staff members to review the bot.', inline=False)
embed.add_field(name='Command Permissions:', value='`@everyone`', inline=False)
embed.add_field(name='Usage:', value="```py\n!addbot <BotID> <reason>```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "approve":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Approve Command')
embed.add_field(name='Command Description:', value="This command Approves a user's requested bot and notifies the user that the bot has been approved.", inline=False)
embed.add_field(name='Command Permissions:', value='`ADMINISTRATOR`', inline=False)
embed.add_field(name='Usage:', value="```py\n!approve <@userID> <reason>```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "disapprove":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Disapprove Command')
embed.add_field(name='Command Description:', value="This command Disapproves a user's requested bot and notifies the user that the bot has been disapproved.", inline=False)
embed.add_field(name='Command Permissions:', value='`ADMINISTRATOR`', inline=False)
embed.add_field(name='Usage:', value="```py\n!disapprove <@userID> <reason>```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "ticket":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Ticket Command')
embed.add_field(name='Command Description:', value="This command will create a ticket with a provided reason.", inline=False)
embed.add_field(name='Command Permissions:', value='`@everyone`', inline=False)
embed.add_field(name='Usage:', value="```py\n!ticket <reason>```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "close":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Close Command')
embed.add_field(name='Command Description:', value="This command will Delete your ticket after you're done.", inline=False)
embed.add_field(name='Command Permissions:', value='`@everyone`', inline=False)
embed.add_field(name='Usage:', value="```py\n!close```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
elif command == "modclose":
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.set_author(name='Mod Close Command')
embed.add_field(name='Command Description:', value="This command will End the ModMail support.", inline=False)
embed.add_field(name='Command Permissions:', value='`BAN_MEMBERS`', inline=False)
embed.add_field(name='Usage:', value="```py\n!modclose <@userID>```", inline=False)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
# Report Command
@bot.command(name="report", description="Report someone who broke a rule.")
@commands.cooldown(1, 300, commands.BucketType.user)
async def report(ctx, suspect: discord.Member, *, crime: commands.clean_content):
if crime == None:
embed = discord.Embed(title='<:F:780326063120318465> No Report Sent', description="No reports have been sent because you didn't specify any reason for your Report.")
else:
guild = get(bot.guilds, id=780278916173791232)
channel = get(guild.text_channels, id=781370368702808064)
report = discord.Embed(title='New Report', color=0x2F3136)
report.add_field(name='Reporter:', value=f'<@!{ctx.author.id}>({ctx.author.name}#{ctx.author.discriminator})', inline=False)
report.add_field(name="Reporter's ID:", value=f'{ctx.author.id}', inline=False)
report.add_field(name='Reported User:', value=f'<@!{suspect.id}>({suspect.name}#{suspect.discriminator})', inline=False)
report.add_field(name="Reported User's ID:", value=suspect.id, inline=False)
report.add_field(name='Reason:', value=f'{crime}', inline=False)
report.set_thumbnail(url=ctx.author.avatar_url)
report.set_footer(text='Discord.py For Beginners', icon_url=logo)
await channel.send(embed=report)
response = discord.Embed(title='<:D:780326506366500864> Report Sent', description='Your Report has been sent to **Discord.py For Beginner** Staff \n Our staff will review your report and take actions accordingly.', color=0x2F3136)
response.set_footer(text='Discord.py For Beginner', icon_url=logo)
await ctx.send(embed=response, delete_after=5)
# Ticket Close
@bot.command(name="close", description="close your ticket")
async def close(ctx):
if ctx.channel.category_id == 780420074719936534:
if ctx.channel.name == f'ticket-{ctx.author.discriminator}':
await ctx.send('<:S:790882958574616616> Closing Ticket in 5 seconds.')
await asyncio.sleep(5)
await ctx.channel.delete(reason="Author of this ticket decided to close it.")
elif ctx.author.guild_permissions.administrator:
await ctx.send('<:S:790882958574616616> Closing Ticket in 5 seconds.')
await asyncio.sleep(5)
await ctx.channel.delete(reason="Author of this ticket decided to close it.")
else:
await ctx.send("<:F:780326063120318465> You can't close this ticket")
else:
await ctx.send(f"<:F:780326063120318465> This Channel is not a Ticket.")
# Ticket Command
@bot.command(name="ticket", description="Open a ticket.")
async def ticket(ctx, *, reason=None):
if ctx.channel.id == 780418954236788737:
if reason == None:
await ctx.send("<:F:780326063120318465> Your Ticket was not created becaue you didn't specify a reason.")
else:
guild = get(bot.guilds, id=780278916173791232)
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
category = bot.get_channel(780420074719936534)
chnl = await guild.create_text_channel(name=f'ticket-{ctx.author.discriminator}', overwrites=overwrites, reason='New Ticket', category=category)
await chnl.set_permissions(ctx.author, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True)
chan = discord.utils.get(guild.text_channels, name=f'ticket-{ctx.author.discriminator}')
embed = discord.Embed(title=f"{ctx.author.name}'s Ticket", description=f"This Ticket has been created in **Discord.py For Beginners** Server\n\n**Reason:**\n{reason}", color=0x2F3136)
embed.set_thumbnail(url=ctx.author.avatar_url)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await chan.send(embed=embed)
await ctx.send(f'<:S:790882958574616616> Your Ticket has been created! <#{chan.id}>')
else:
await ctx.send('<:F:780326063120318465> You can only create tickets in <#780418954236788737>.')
# About Command
@bot.command(name="about", description="Shows information about the server and the bot.")
@commands.guild_only()
async def about(ctx):
asd = get(ctx.guilds, id=780278916173791232)
if ctx.guild.id == asd:
embed = discord.Embed(timestamp=ctx.message.created_at, title='About')
embed.add_field(name='Developer:', value='`Benitz Original#1317`')
embed.add_field(name='Server Members:', value=f'{len(bot.users)}')
embed.add_field(name='Server ID:', value='`780278916173791232`')
embed.add_field(name='Server Owner:', value='`Benitz Original#1317`')
embed.add_field(name='Server Creation Date:', value=asd.creation_at.__format__('%A, %d. %B %Y'))
embed.add_field(name='Server Region:', value='`US Central`')
embed.set_thumbnail(url=logo)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
else:
return
# Check Command
@bot.command(name="check", description="checks the user's info.")
@commands.has_permissions(manage_messages=True)
async def check(ctx, user: discord.Member = None):
if user is None:
user = ctx.message.author
if user.activity is not None:
game = user.activity.name
else:
game = None
voice_state = None if not user.voice else user.voice.channel
embed = discord.Embed(timestamp=ctx.message.created_at, color=0x2F3136)
embed.add_field(name='User ID:', value=user.id, inline=False)
embed.add_field(name='Nick:', value=user.nick, inline=False)
embed.add_field(name='Status:', value=user.status, inline=False)
embed.add_field(name='On Mobile:', value=user.is_on_mobile(), inline=False)
embed.add_field(name='In Voice:', value=voice_state, inline=True)
embed.add_field(name='Game / Custom Status:', value=game, inline=False)
embed.add_field(name='Highest Role:', value=user.top_role.name, inline=False)
embed.add_field(name='Account Created Date:', value=user.created_at.__format__('%A, %d. %B %Y'))
embed.add_field(name='Account Creation Time:', value=user.created_at.__format__('%H:%M:%S'))
embed.add_field(name='Join Date:', value=user.joined_at.__format__('%A, %d. %B %Y'), inline=False)
embed.add_field(name='Joined Time:', value=user.joined_at.__format__('%H:%M:%S'), inline=True)
embed.set_thumbnail(url=user.avatar_url)
embed.set_author(name=user.name, icon_url=user.avatar_url)
embed.set_footer(text='Discord.py For Beginners', icon_url=logo)
await ctx.send(embed=embed)
# Ping Command
@bot.command(name="ping", description="shows the bot's ping.")
async def ping(ctx):
before = time.monotonic()
before_ws = int(round(bot.latency * 1000, 1))
message = await ctx.send("🏓 Pong", delete_after=0)
ping = (time.monotonic() - before) * 1000
p = discord.Embed(title=f"Discord Python's Ping", description=f'WebShock Ping: `{before_ws}m/s` | Rest Ping: `{int(ping)}m/s`', color=0x2F3136)
p.set_footer(text=f'Discord.py For Beginners', icon_url=f'{logo}')
p.timestamp = datetime.utcnow()
await ctx.send(embed=p)
# Shutdown Command
@bot.command(name="shutdown", description="shuts down the bot offline.")
async def shutdown(ctx):
access = [529499034495483926, 635838945862746113]
if ctx.author.id == access:
await ctx.send('<:S:790882958574616616> Bot is shutting down.')
await ctx.message.delete(ctx.message)
await bot.change_presence(status=discord.Status.offline)
await bot.logout()
else:
await ctx.send("<:F:780326063120318465> You don't have access to that command.")
# Load Cog
@bot.command(name="load", description="Loads a cog")
@commands.is_owner()
async def load(ctx, *, name: str):
try:
bot.load_extension(f"cogs.{name}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
em = discord.Embed(title="Cog Loaded", description=f"**{name}** cog has been loaded.", color=0x00FF00)
await ctx.send(embed=em)
# Unload Cog
@bot.command(name="unload", description="unloads a cog")
@commands.is_owner()
async def unload(ctx, *, name: str):
try:
bot.unload_extension(f"cogs.{name}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
em = discord.Embed(title="Cog Unloaded", description=f"**{name}** cog has been unloaded.", color=0x00FF00)
await ctx.send(embed=em)
# Reload Cog
@bot.command(name="reload", description="reloads a cog")
@commands.is_owner()
async def reload(ctx, *, name: str):
try:
bot.reload_extension(f"cogs.{name}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
em = discord.Embed(title="Cog Reloaded", description=f"**{name}** cog has been reloaded.", color=0x00FF00)
await ctx.send(embed=em)
# Read Cogs
for file in os.listdir("./cogs"):
if file.endswith(".py"):
name = file[:-3]
bot.load_extension(f"cogs.{name}")
# Run Bot
try:
with open('./config.json') as f:
token = json.load(f).get('token') or os.environ.get('token')
bot.run(token, reconnect=True)
except Exception as e:
print(e)
|
from typing import Optional
from pydantic import BaseModel, validator
class AuditLogDetails(BaseModel):
parameter_name: str
time: int
action: str
user: str
value: Optional[str]
decrypted_value: Optional[str]
type: Optional[str]
description: Optional[str]
version: Optional[int]
key_id: Optional[str]
|
import numpy as np
import time
import cv2
INPUT_FILE='e.png'
OUTPUT_FILE='predicted.jpg'
LABELS_FILE='yolo-coco/coco.names'
CONFIG_FILE='yolo-coco/yolov3.cfg'
WEIGHTS_FILE='best.pt'
CONFIDENCE_THRESHOLD=0.7
def obj_det():
LABELS = open(LABELS_FILE).read().strip().split("\n")
np.random.seed(4)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
net = cv2.dnn.readNetFromDarknet(CONFIG_FILE, WEIGHTS_FILE)
image = cv2.imread(INPUT_FILE)
image = cv2.resize(image, (840, 640))
(H, W) = image.shape[:2]
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# print("[INFO] YOLO took {:.6f} seconds".format(end - start))
# initialize our lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONFIDENCE_THRESHOLD:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE_THRESHOLD,
CONFIDENCE_THRESHOLD)
label_pred = []
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(image, (x, y), (x+w, y+h), color, 2)
text = "{}:{:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv2.putText(image, text, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# if (LABELS[classIDs[i]] != 'person'):
# (x_object, y_object) = (boxes[i][0], boxes[i][1])
# (w_object, h_object) = (boxes[i][2], boxes[i][3])
# cv2.rectangle(image, (x_object, y_object), (x_object+w_object, y_object+h_object), color, 2)
# text = "{}:{:.4f}".format(LABELS[classIDs[i]], confidences[i])
# cv2.putText(image, text, (x_object, y_object-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
name = LABELS[classIDs[i]]
label_pred.append(name)
print('d' in label_pred)
cv2.imshow("Image", image)
cv2.waitKey()
obj_det()
|
#!python
import sys
import os
import glob
import argparse
import atexit
import shutil
import tempfile
import subprocess
temp_dir = tempfile.mkdtemp(prefix='vpview-tmp')
atexit.register(lambda: shutil.rmtree(temp_dir))
if os.name == 'nt':
div = '\\'
else:
div = '/'
# Helper class to list files with a given extension in a directory
def list_files_in_dir( folder ):
if os.path.exists( folder ):
return [
os.path.join( folder, f ) for f in sorted( os.listdir( folder ) )
if not f.startswith('.')
]
return []
def list_files_in_dir_w_ext( folder, extension ):
return [f for f in list_files_in_dir(folder) if f.endswith(extension)]
def glob_files_in_folder( folder, prefix, extension ):
return glob.glob( os.path.join( folder, prefix ) + "*" + extension )
def multi_glob_files_in_folder( folder, prefixes, extensions ):
output = []
for prefix in prefixes:
for extension in extensions:
output.extend( glob.glob( os.path.join( folder, prefix ) + "*" + extension ) )
return output
def get_script_path():
return os.path.dirname( os.path.realpath( sys.argv[0] ) )
def create_dir( dirname ):
if not os.path.exists( dirname ):
print( "Creating " + dirname )
os.makedirs( dirname )
if not os.path.exists( dirname ):
print( "Unable to create " + dirname )
sys.exit( 0 )
def get_gui_cmd( debug=False ):
if os.name == 'nt':
return ['vpView.exe']
else:
if debug:
return [ 'gdb', '--args', 'vpView' ]
else:
return ['vpView']
def execute_command( cmd, stdout=None, stderr=None ):
return subprocess.call(cmd, stdout=stdout, stderr=stderr)
def get_script_path():
return os.path.dirname( os.path.realpath( sys.argv[0] ) )
def find_file( filename ):
if( os.path.exists( filename ) ):
return os.path.abspath( filename )
elif os.path.exists( get_script_path() + div + filename ):
return get_script_path() + div + filename
else:
print( "Unable to find " + filename )
sys.exit( 0 )
def create_pipelines_list( glob_str ):
(fd, name) = tempfile.mkstemp(prefix='vpview-pipelines-',
suffix='.ini',
text=True, dir=temp_dir)
search_str = os.path.join( get_script_path(), glob_str )
pipeline_files = sorted( glob.glob( search_str ) )
total_entries = len( pipeline_files )
f = os.fdopen(fd, 'w')
f.write("[EmbeddedPipelines]\n")
f.write("size=" + str( total_entries ) + "\n")
for ind, full_path in enumerate( pipeline_files ):
name_id = os.path.splitext( os.path.basename( full_path ) )[0]
f.write("%s\\Name=\"%s\"\n" % (ind+1, name_id) )
f.write("%s\\Path=\"%s\"\n" % (ind+1, full_path.replace("\\","\\\\") ) )
f.close()
return name
def default_annotator_args( args ):
command_args = []
if len( args.gui_theme ) > 0:
command_args += [ "--theme", find_file( args.gui_theme ) ]
if len( args.pipelines ) > 0:
command_args += [ "--import-config", create_pipelines_list( args.pipelines ) ]
return command_args
def get_pipeline_cmd( debug=False ):
if os.name == 'nt':
if debug:
return ['pipeline_runner.exe']
else:
return ['pipeline_runner.exe']
else:
if debug:
return ['gdb', '--args', 'pipeline_runner']
else:
return ['pipeline_runner']
def generate_index_for_video( args, file_path, basename ):
if not os.path.isfile( file_path ):
print( "Unable to find file: " + file_path )
sys.exit( 0 )
cmd = get_pipeline_cmd() + \
["-p", find_file( args.cache_pipeline ) ] + \
["-s", 'input:video_filename=' + file_path ] + \
["-s", 'input:video_reader:type=vidl_ffmpeg' ] + \
["-s", 'kwa_writer:output_directory=' + args.cache_dir ] + \
["-s", 'kwa_writer:base_filename=' + basename ] + \
["-s", 'kwa_writer:stream_id=' + basename ]
if len( args.frame_rate ) > 0:
cmd += ["-s", 'downsampler:target_frame_rate=' + args.frame_rate ]
execute_command( cmd )
return args.cache_dir + div + basename + ".index"
def select_option( option_list, display_str="Select Option:" ):
sys.stdout.write( "\n" )
counter = 1
for option in option_list:
print( "(" + str(counter) + ") " + option )
counter = counter + 1
sys.stdout.write( "\n" + display_str + " " )
sys.stdout.flush()
if sys.version_info[0] < 3:
choice = raw_input().lower()
else:
choice = input().lower()
if int( choice ) < 1 or int( choice ) > len( option_list ):
print( "Invalid selection, must be a valid number" )
sys.exit(0)
return int( choice ) - 1
def process_video( args ):
print( "Function not yet implemented" )
sys.exit(0)
def process_list( args ):
print( "Function not yet implemented" )
sys.exit(0)
def process_video_dir( args ):
video_files = list_files_in_dir( args.video_dir )
index_files = list_files_in_dir_w_ext( args.cache_dir, "index" )
video_files.sort()
index_files.sort()
video_files_no_ext_no_path = [os.path.splitext(os.path.basename(f))[0] for f in video_files]
index_files_no_ext_no_path = [os.path.splitext(os.path.basename(f))[0] for f in index_files]
net_files = video_files_no_ext_no_path
net_full_paths = video_files
total_video_count = len( video_files_no_ext_no_path )
total_index_count = len( index_files_no_ext_no_path )
has_index = [False] * total_video_count
for fpath, fname in zip( index_files, index_files_no_ext_no_path ):
if fname in net_files:
index = net_files.index( fname )
has_index[ index ] = True
net_full_paths[ index ] = fpath
else:
net_files.append( fname )
has_index.append( True )
net_full_paths.append( fpath )
if len( net_files ) == 0:
print( "\nError: No videos found in input directory: " + args.video_dir + "\n" )
print( "If you want to load videos, not just images, make sure it is non-empty" )
# Have user select video file
file_list = []
for fname, is_cached in zip( net_files, has_index ):
file_list.append( fname + ( " (cached in: " + args.cache_dir + ")" if is_cached else "" ) )
if len( file_list ) > 0 and file_list[0].islower():
no_file = "with_no_imagery_loaded"
else:
no_file = "With No Imagery Loaded"
file_list = [no_file] + sorted( file_list )
special_list_option = "input_list.txt"
has_special_list_option = False
if os.path.exists( special_list_option ):
file_list = file_list + [ special_list_option ]
has_special_list_option = True
file_id = select_option( file_list )
if file_id == 0:
execute_command( get_gui_cmd( args.debug ) + default_annotator_args( args ) )
sys.exit(0)
elif has_special_list_option and file_id == len( file_list ) - 1:
file_no_ext = special_list_option
file_has_index = True
file_path = special_list_option
else:
file_id = file_id - 1
file_no_ext = net_files[ file_id ]
file_has_index = has_index[ file_id ]
file_path = net_full_paths[ file_id ]
# Scan for possible detection file
detection_list = []
detection_file = ""
search_prefix = [ file_no_ext + ".", file_no_ext + "_detections", file_no_ext + "_tracks" ]
detection_search = multi_glob_files_in_folder( '.', file_no_ext, ["csv"] )
if len( detection_search ) > 0:
detection_list.extend( detection_search )
if len( args.video_dir ) > 0 and args.video_dir != '.':
detection_search = glob_files_in_folder( args.video_dir, file_no_ext, "csv" )
detection_list.extend( detection_search )
if len( args.cache_dir ) > 0 and args.cache_dir != '.' and args.cache_dir != args.video_dir:
detection_search = glob_files_in_folder( args.cache_dir, file_no_ext, "csv" )
detection_list.extend( detection_search )
detection_list = sorted( detection_list )
if len( detection_list ) > 0:
if len( detection_list ) > 0 and detection_list[0].islower():
no_file = "with_no_detections"
else:
no_file = "Launch Without Loading Detections"
detection_list = [ no_file ] + detection_list
detection_id = select_option( detection_list )
if detection_id != 0:
detection_file = detection_list[ detection_id ]
# Launch GUI with required options
if not file_has_index:
create_dir( args.cache_dir )
if not os.path.isdir( file_path ):
print( "Generating cache for video file, this may take up to a few minutes.\n" )
file_path = generate_index_for_video( args, file_path, file_no_ext )
else:
from process_video import make_filelist_for_image_dir
file_path = make_filelist_for_image_dir( file_path, args.cache_dir,
file_no_ext )
(fd, name) = tempfile.mkstemp(prefix='vpview-project-',
suffix='.prj',
text=True, dir=temp_dir)
ftmp = os.fdopen(fd, 'w')
ftmp.write( "DataSetSpecifier=" + os.path.abspath( file_path ).replace("\\","\\\\") + "\n" )
if len( detection_file ) > 0:
ftmp.write( "TracksFile=" + os.path.abspath( detection_file ).replace("\\","\\\\") + "\n" )
ftmp.close()
execute_command( get_gui_cmd( args.debug ) + [ "-p", name ] + default_annotator_args( args ) )
# Main Function
if __name__ == "__main__" :
parser = argparse.ArgumentParser(description="Launch annotation GUI",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-d", dest="video_dir", default="",
help="Input directory containing videos to run annotator on")
parser.add_argument("-c", dest="cache_dir", default="",
help="Input directory containing cached video .index files")
parser.add_argument("-o", dest="output_directory", default="database",
help="Output directory to store files in")
parser.add_argument("-v", dest="input_video", default="",
help="Input video file to run annotator on")
parser.add_argument("-l", dest="input_list", default="",
help="Input image list file to run annotator on")
parser.add_argument("-theme", dest="gui_theme",
default="gui-params" + div + "dark_gui_settings.ini",
help="Predefined query directory, if present")
parser.add_argument("-pipelines", dest="pipelines",
default="pipelines" + div + "embedded_single_stream" + div + "*.pipe",
help="Glob pattern for runable processing pipelines")
parser.add_argument("-cache-pipe", dest="cache_pipeline",
default="pipelines" + div + "filter_to_kwa.pipe",
help="Pipeline used for generative video .index files")
parser.add_argument("-frate", dest="frame_rate", default="",
help="Frame rate over-ride to process videos at")
parser.add_argument("--debug", dest="debug", action="store_true",
help="Run with debugger attached to process")
parser.set_defaults( debug=False )
args = parser.parse_args()
if len( args.video_dir ) > 0 or len( args.cache_dir ) > 0:
process_video_dir( args )
elif len( args.input_video ) > 0:
process_video( args )
elif len( args.input_list ) > 0:
process_list( args )
else:
execute_command( get_gui_cmd( args.debug ) + default_annotator_args( args ) )
|
import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from django.utils import translation
from .models import (
Article, ArticleIdea, ArticleTag, ArticleTranslation, Country, Friendship,
Group, Membership, NewsArticle, Person,
)
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
def setUp(self):
# Creating countries
self.usa = Country.objects.create(name="United States of America")
self.soviet_union = Country.objects.create(name="Soviet Union")
Person()
# Creating People
self.bob = Person()
self.bob.name = 'Bob'
self.bob.person_country = self.usa
self.bob.save()
self.jim = Person.objects.create(name='Jim', person_country=self.usa)
self.george = Person.objects.create(name='George', person_country=self.usa)
self.jane = Person.objects.create(name='Jane', person_country=self.soviet_union)
self.mark = Person.objects.create(name='Mark', person_country=self.soviet_union)
self.sam = Person.objects.create(name='Sam', person_country=self.soviet_union)
# Creating Groups
self.kgb = Group.objects.create(name='KGB', group_country=self.soviet_union)
self.cia = Group.objects.create(name='CIA', group_country=self.usa)
self.republican = Group.objects.create(name='Republican', group_country=self.usa)
self.democrat = Group.objects.create(name='Democrat', group_country=self.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when the there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
with self.assertRaises(Person.DoesNotExist):
getattr(membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
with self.assertNumQueries(1):
membership = self.bob.membership_set.get()
self.assertEqual(membership.group_id, self.cia.id)
self.assertIs(membership.person, self.bob)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_double_nested_query(self):
m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id,
to_friend_country_id=self.usa.id, to_friend_id=self.jim.id)
self.assertSequenceEqual(
Membership.objects.filter(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all())
)
),
[m1]
)
self.assertSequenceEqual(
Membership.objects.exclude(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(to_friend__in=Person.objects.all())
)
),
[m2]
)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
membership_sets = [
list(p.membership_set.all())
for p in Person.objects.prefetch_related('membership_set').order_by('pk')]
with self.assertNumQueries(7):
normal_membership_sets = [
list(p.membership_set.all())
for p in Person.objects.order_by('pk')
]
self.assertEqual(membership_sets, normal_membership_sets)
def test_m2m_through_forward_returns_valid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia)
# Let's check to make sure that it worked. Bob and Jim should be members of the CIA.
self.assertQuerysetEqual(
self.cia.members.all(), [
'Bob',
'Jim'
], attrgetter("name")
)
def test_m2m_through_reverse_returns_valid_members(self):
# We start out by making sure that Bob is in no groups.
self.assertQuerysetEqual(
self.bob.groups.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.bob,
group=self.republican)
# Bob should be in the CIA and a Republican
self.assertQuerysetEqual(
self.bob.groups.all(), [
'CIA',
'Republican'
], attrgetter("name")
)
def test_m2m_through_forward_ignores_invalid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# There should still be no members in CIA
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
def test_m2m_through_reverse_ignores_invalid_members(self):
# We start out by making sure that Jane has no groups.
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# Jane should still not be in any groups
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
def test_m2m_through_on_self_works(self):
self.assertQuerysetEqual(
self.jane.friends.all(),
[]
)
Friendship.objects.create(
from_friend_country=self.jane.person_country, from_friend=self.jane,
to_friend_country=self.george.person_country, to_friend=self.george)
self.assertQuerysetEqual(
self.jane.friends.all(),
['George'], attrgetter("name")
)
def test_m2m_through_on_self_ignores_mismatch_columns(self):
self.assertQuerysetEqual(self.jane.friends.all(), [])
# Note that we use ids instead of instances. This is because instances on ForeignObject
# properties will set all related field off of the given instance
Friendship.objects.create(
from_friend_id=self.jane.id, to_friend_id=self.george.id,
to_friend_country_id=self.jane.person_country_id,
from_friend_country_id=self.george.person_country_id)
self.assertQuerysetEqual(self.jane.friends.all(), [])
def test_prefetch_related_m2m_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
members_lists = [list(g.members.all())
for g in Group.objects.prefetch_related('members')]
normal_members_lists = [list(g.members.all()) for g in Group.objects.all()]
self.assertEqual(members_lists, normal_members_lists)
def test_prefetch_related_m2m_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')]
normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()]
self.assertEqual(groups_lists, normal_groups_lists)
@translation.override('fi')
def test_translations(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa')
at1_fi.save()
at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala')
at2_en.save()
self.assertEqual(Article.objects.get(pk=a1.pk).active_translation, at1_fi)
with self.assertNumQueries(1):
fetched = Article.objects.select_related('active_translation').get(
active_translation__title='Otsikko')
self.assertEqual(fetched.active_translation.title, 'Otsikko')
a2 = Article.objects.create(pub_date=datetime.date.today())
at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa',
abstract='dipad')
at2_fi.save()
a3 = Article.objects.create(pub_date=datetime.date.today())
at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala',
abstract='lala')
at3_en.save()
# Test model initialization with active_translation field.
a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en)
a3.save()
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a3])
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None,
active_translation__pk__isnull=False)),
[a1])
with translation.override('en'):
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a2])
def test_foreign_key_raises_informative_does_not_exist(self):
referrer = ArticleTranslation()
with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'):
referrer.article
def test_foreign_key_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
ArticleTag.objects.create(article=a1, name="foo")
self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1)
self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0)
msg = (
"Cannot resolve keyword 'tags' into field. Choices are: "
"active_translation, active_translation_q, articletranslation, "
"id, idea_things, newsarticle, pub_date, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(tags__name="foo")
def test_many_to_many_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
i1 = ArticleIdea.objects.create(name="idea1")
a1.ideas.add(i1)
self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1)
self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0)
msg = (
"Cannot resolve keyword 'ideas' into field. Choices are: "
"active_translation, active_translation_q, articletranslation, "
"id, idea_things, newsarticle, pub_date, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(ideas__name="idea1")
@translation.override('fi')
def test_inheritance(self):
na = NewsArticle.objects.create(pub_date=datetime.date.today())
ArticleTranslation.objects.create(
article=na, lang="fi", title="foo", body="bar")
self.assertSequenceEqual(
NewsArticle.objects.select_related('active_translation'),
[na]
)
with self.assertNumQueries(1):
self.assertEqual(
NewsArticle.objects.select_related(
'active_translation')[0].active_translation.title,
"foo")
@skipUnlessDBFeature('has_bulk_insert')
def test_batch_create_foreign_object(self):
objs = [Person(name="abcd_%s" % i, person_country=self.usa) for i in range(0, 5)]
Person.objects.bulk_create(objs, 10)
def test_isnull_lookup(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group_id=None)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
self.assertQuerysetEqual(
Membership.objects.filter(group__isnull=True),
['<Membership: Bob is a member of NULL>']
)
self.assertQuerysetEqual(
Membership.objects.filter(group__isnull=False),
['<Membership: Bob is a member of CIA>']
)
class TestModelCheckTests(SimpleTestCase):
@isolate_apps('foreign_object')
def test_check_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
@isolate_apps('foreign_object')
def test_check_subset_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'c'),
to_fields=('a', 'b', 'c'),
related_name='children',
)
self.assertEqual(Child._meta.get_field('parent').check(from_model=Child), [])
class TestExtraJoinFilterQ(TestCase):
@translation.override('fi')
def test_extra_join_filter_q(self):
a = Article.objects.create(pub_date=datetime.datetime.today())
ArticleTranslation.objects.create(article=a, lang='fi', title='title', body='body')
qs = Article.objects.all()
with self.assertNumQueries(2):
self.assertEqual(qs[0].active_translation_q.title, 'title')
qs = qs.select_related('active_translation_q')
with self.assertNumQueries(1):
self.assertEqual(qs[0].active_translation_q.title, 'title')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import numpy as np
import pyrotein as pr
from display import plot_rmsd_dmat
import pprint
# Set job name...
job_name = "xfam"
# load the data...
rmsd_dmat = np.load("rmsd_dmat.seq.npy")
len_res = np.load("rmsd_len.seq.npy")
nseqi = np.load("nseqi.seq.npy")
cseqi = np.load("cseqi.seq.npy")
fl_rmsd_dmat = f"{job_name}.rmsd"
# Define a colorscheme...
# Colorscheme is inspired by from this publication (DOI: 10.1093/nar/gkw555) from Zhong Ren
pal = "set palette defined ( 0 'seagreen', 0.1 'white', 0.5 'blue', 1 'navy' )"
# Create labels...
# +1 to make it a right-end closed
labels = {'H8': [620, 633],
'TM1': [0, 33],
'TM2': [47, 79],
'TM3': [115, 151],
'TM4': [182, 209],
'TM5': [277, 317],
'TM6': [503, 542],
'TM7': [584, 612]}
for k, v in labels.items(): labels[k] = [ (i) * len_res for i in v ]
plot_rmsd_dmat( rmsd_dmat,
fl_rmsd_dmat,
pop_bin_cap = 50,
fwk_mid = 2.072870590257889,
fwk_tol = 0.5,
fwk_minsize = 10,
fwk_linewidth = 3,
curve_linewidth = 3,
lbl = labels,
width = 10,
height = 12,
fontsize = 29,
lbl_fontsize = 29,
linewidth = 1.0,
palette = pal,
intst_max = "*",
)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import logging
import onnx
import os
import pathlib
import tempfile
from collections import deque
from enum import IntEnum
from ..onnx_model_utils import get_producer_consumer_maps, optimize_model, \
iterate_graph_per_graph_func, iterate_graph_per_node_func, is_fixed_size_tensor
class _SupportedOpsChecker:
'''
Class to process the md file with list of supported ops and caveats for an execution provider.
e.g. /tools/ci_build/github/android/nnapi_supported_ops.md
/tools/ci_build/github/apple/coreml_supported_ops.md
'''
def __init__(self, filename):
self._filename = filename
self._ops = {} # op to caveats
self._ops_seen = set()
with open(filename, 'r') as f:
for line in f.readlines():
# we're looking for a markdown table with 2 columns. first is op name. second is caveats
# op name is domain:op
if line.startswith('|'):
pieces = line.strip().split('|')
if len(pieces) == 4: # pre-first '|'. op, caveat, post-last '|'
domain_op = pieces[1]
caveat = pieces[2]
caveat = caveat.replace('<br/>', ' ') # remove some HTML tags
# skip lines that don't have the ':' which separates the domain and op
# e.g. the table header will fail this check
if ':' in domain_op:
self._ops[domain_op] = caveat
def is_op_supported(self, node):
domain = node.domain if node.domain else 'ai.onnx'
domain_op = domain + ':' + node.op_type
is_supported = domain_op in self._ops
if is_supported:
self._ops_seen.add(domain_op)
return is_supported
def get_caveats(self):
caveats = []
for op in sorted(self._ops_seen):
caveat = self._ops[op]
if caveat:
caveats.append(f'{op}:{caveat}')
return caveats
class PartitioningInfo:
class TryWithEP(IntEnum):
NO = 0,
MAYBE = 1,
YES = 2
def __init__(self):
self.num_nodes = -1 # main graph only
self.num_supported_nodes = -1
self.num_partitions = -1
self.num_nodes_in_subgraphs = -1 # nodes not covered as we don't currently handle subgraphs in nnapi/coreml
self.supported_ops_checker = None
self.supported_groups = []
self.unsupported_ops = set()
self.nodes_unsupported_due_to_op = -1
self.nodes_unsupported_due_to_dynamic_input = -1
def suitability(self):
# for now add up all the nodes. if there are subgraphs, the percentage of covered nodes will be reduced by all
# nodes in the subgraphs.
num_nodes = self.num_nodes + self.num_nodes_in_subgraphs
# semi-arbitrary choices that err on the side of MAYBE.
# having 1 partition is always preferred, but if that is small it may not be useful.
# having 2 partitions may be okay if they cover most nodes
# more than 2 partitions and the device copy cost is almost guaranteed to outweight the benefit of using the NPU
# NOTE: This assumes the EP is not CPU based and there is device copy overhead to consider
pct_supported = self.num_supported_nodes / num_nodes * 100
if self.num_partitions == 1:
if pct_supported > 75:
return PartitioningInfo.TryWithEP.YES
elif pct_supported > 50:
return PartitioningInfo.TryWithEP.MAYBE
else:
return PartitioningInfo.TryWithEP.NO
if self.num_partitions == 2:
if pct_supported > 75:
return PartitioningInfo.TryWithEP.MAYBE
else:
return PartitioningInfo.TryWithEP.NO
return PartitioningInfo.TryWithEP.NO
def dump_analysis(self, logger: logging.Logger, ep_name: str):
'''
Analyze the partitioning information and log the analysis
:param logger: Logger to use
:param ep_name: Execution provider name to use in the log messages
'''
num_nodes = self.num_nodes + self.num_nodes_in_subgraphs
logger.info(f'{self.num_partitions} partitions with a total of {self.num_supported_nodes}/{num_nodes} '
f'nodes can be handled by the {ep_name} EP.')
if self.num_nodes_in_subgraphs:
logger.info(f'{self.num_nodes_in_subgraphs} nodes are in subgraphs, which are currently not handled.')
if self.supported_groups:
logger.info(f'Partition sizes: [{", ".join([str(len(partition)) for partition in self.supported_groups])}]')
logger.info(f'Unsupported nodes due to operator={self.nodes_unsupported_due_to_op}')
if self.nodes_unsupported_due_to_dynamic_input:
logger.info('Unsupported nodes due to input having a dynamic shape=%d',
self.nodes_unsupported_due_to_dynamic_input)
if logger.getEffectiveLevel() <= logging.DEBUG:
# Enable this manually if you need to look at specific partitions.
# for group in supported_groups:
# logger.debug(f'Nodes in group: {",".join([f"{node.name}:{node.op_type}" for node in group])}')
if self.unsupported_ops:
logger.info(f'Unsupported ops: {",".join(sorted(self.unsupported_ops))}')
caveats = self.supported_ops_checker.get_caveats()
if caveats:
indent = ' ' * 5
logger.debug('Caveats that have not been checked and may result in a node not being supported: '
f'{"".join([os.linesep + indent + caveat for caveat in caveats])}')
pct_nodes_using_ep = self.num_supported_nodes / num_nodes * 100
if self.num_partitions == 0:
logger.info(f"{ep_name} cannot run any nodes in this model.")
elif self.num_partitions == 1:
if pct_nodes_using_ep > 75:
logger.info(f"{ep_name} should work well for this model as there is one partition "
f"covering {pct_nodes_using_ep:.1f}% of the nodes in the model.")
elif pct_nodes_using_ep > 50:
logger.info(
f"{ep_name} may work well for this model, however only {pct_nodes_using_ep:.1f}% of nodes "
"will use it. Performance testing is required to validate.")
else:
logger.info(
f"{ep_name} will probably not work will for this model as only {pct_nodes_using_ep:.2f}% "
"of nodes will use it.")
elif self.num_partitions == 2 and pct_nodes_using_ep > 75:
logger.info(f"{ep_name} can be considered for this model as there are two partitions "
f"covering {pct_nodes_using_ep:.1f}% of the nodes. "
"Performance testing is required to validate.")
else:
logger.info(f"{ep_name} is not recommended with this model as there are {self.num_partitions} partitions "
f"covering {pct_nodes_using_ep:.1f}% of the nodes in the model. "
"This will most likely result in worse performance than just using the CPU EP.")
def check_partitioning(graph: onnx.GraphProto, supported_ops_checker: _SupportedOpsChecker,
require_fixed_input_sizes: bool = False, value_info: dict = None):
'''
Estimate the partitions the graph will be split into for nodes that is_node_supported_fn returns true for.
The check on whether a node is supported is purely based on the operator type. Additional limitations
(e.g. NNAPI EP only supports 2D Conv) are not checked, so partitions may not be 100% accurate. The limitations
for operators in the partitions are printed so the user can manually check.
:param graph: Graph to process
:param supported_ops_checker: Checker with info on supported ops.
:param require_fixed_input_sizes: If True, require that the inputs to a potentially supported node are
fixed size tensors for it to be considered as supported.
If True, onnx.shape_inference.infer_shapes should have been run on the model
to populate the shape information.
:param value_info: Map of value name to ValueInfoProto. Required if require_fixed_input_sizes is True to lookup
the shape of a value.
:return PartitioningInfo instance with details
'''
if require_fixed_input_sizes and not value_info:
raise ValueError("value_info must be provided if require_fixed_input_sizes is True.")
node_to_producers, node_to_consumers = get_producer_consumer_maps(graph)
# initializers have fixed sizes.
# TODO: when adding subgraph support we also need to match against initializers in ancestor graphs as they are
# be accessible from the outer scope (unless shadowed locally)
initializers = [i.name for i in graph.initializer]
def _is_fixed_shape_value(value):
if value in value_info:
return is_fixed_size_tensor(value_info[value])
if value in initializers:
return True
# if something has an unknown shape (e.g. something downstream of a Reshape with dynamic input for the shape)
# it won't have an entry in value_info
return False
#
# Replicate logic from /onnxruntime/core/providers/partitioning_utils.cc:CreateSupportedPartitionNodeGroups
# to roughly estimate number of partitions for nodes that is_node_supported_fn returns true for.
#
# We keep the structure and variable names as close as possible to the C++ implementation to simplify keeping them
# in sync if future updates are needed.
#
# we don't currently support a callback for additional group closure checks in the python implementation
on_group_closed_fn = None
supported_groups = []
# number of inputs from unprocessed nodes (in-degree) per node
in_degree = {}
# nodes that are ready to process
nodes_to_process = deque() # deque of Node instances
# nodes that will be processed when considering the next partition node group
nodes_to_process_with_next_group = deque()
# initialize in-degrees and find root nodes
for node in graph.node:
node_input_edge_count = len(node_to_producers[node]) if node in node_to_producers else 0
in_degree[node] = node_input_edge_count
if node_input_edge_count == 0:
# node is only dependent on graph input or initializers
nodes_to_process.append(node)
# currently we don't support checking subgraphs in the partitioning as they're not handled by NNAPI/CoreML.
# check how many nodes are in that blind spot so we can adjust the recommendation accordingly.
# note: need to pass count in an array so that it's by reference
def _count_subgraph_nodes(cur_graph: onnx.GraphProto, original_graph: onnx.GraphProto, count: [int]):
if cur_graph != original_graph:
count[0] += len(cur_graph.node)
nodes_in_subgraphs = [0] # array with single value
iterate_graph_per_graph_func(graph, _count_subgraph_nodes, original_graph=graph, count=nodes_in_subgraphs)
supported_group = []
# the partition node group's border is the aggregate of its nodes' output nodes
supported_group_border = set()
num_supported_nodes = 0
num_unsupported_nodes_due_to_op = 0
num_unsupported_nodes_due_to_dynamic_input = 0
unsupported_ops = set()
def close_group():
if supported_group:
keep_partition = not on_group_closed_fn or on_group_closed_fn(supported_group)
if keep_partition:
supported_groups.append(supported_group.copy())
supported_group.clear()
supported_group_border.clear()
while nodes_to_process or nodes_to_process_with_next_group:
if not nodes_to_process:
close_group()
nodes_to_process = nodes_to_process_with_next_group
nodes_to_process_with_next_group = deque()
continue
node = nodes_to_process.popleft()
is_op_supported = supported_ops_checker.is_op_supported(node)
is_input_shape_supported = \
not require_fixed_input_sizes or all(_is_fixed_shape_value(i) for i in node.input)
is_node_supported = is_op_supported and is_input_shape_supported
if not is_node_supported:
if node in supported_group_border:
# an unsupported node on the border will be processed after the current partition node group
# so skip any additional processing/counting here
nodes_to_process_with_next_group.append(node)
continue
if not is_op_supported:
unsupported_ops.add(f'{node.domain if node.domain else "ai.onnx"}:{node.op_type}')
num_unsupported_nodes_due_to_op += 1
else:
num_unsupported_nodes_due_to_dynamic_input += 1
if is_node_supported:
num_supported_nodes += 1
# add node to the partition node group
supported_group.append(node)
# remove node from the border and add its outputs to the border
if node in supported_group_border:
supported_group_border.remove(node)
# for each consumer node add to supported_group_border
if node in node_to_consumers:
for consumer in node_to_consumers[node]:
supported_group_border.add(consumer)
# adjust in-degrees of the node outputs and add any new nodes to process
if node in node_to_consumers:
for consumer in node_to_consumers[node]:
consumer_node_in_degree = in_degree[consumer]
consumer_node_in_degree -= 1
if consumer_node_in_degree == 0:
nodes_to_process.append(consumer)
in_degree[consumer] = consumer_node_in_degree
close_group()
# find any subgraphs and check supported for nodes in the subgraphs. this won't change the partitioning as we skip
# Scan/Loop/If nodes, but will provide additional info on operators that are not supported if we changed that.
iterate_graph_per_node_func(graph, supported_ops_checker.is_op_supported)
num_nodes = len(graph.node)
num_partitions = len(supported_groups)
info = PartitioningInfo()
info.num_nodes = num_nodes
info.num_supported_nodes = num_supported_nodes
info.num_partitions = num_partitions
info.num_nodes_in_subgraphs = nodes_in_subgraphs[0]
info.supported_ops_checker = supported_ops_checker
info.supported_groups = supported_groups
info.unsupported_ops = unsupported_ops
info.nodes_unsupported_due_to_op = num_unsupported_nodes_due_to_op
info.nodes_unsupported_due_to_dynamic_input = num_unsupported_nodes_due_to_dynamic_input
return info
def _check_ep_partitioning(model, supported_ops_config, value_info: dict = None):
supported_ops = _SupportedOpsChecker(supported_ops_config)
partition_info = check_partitioning(model.graph, supported_ops, value_info is not None, value_info)
return partition_info
def check_nnapi_partitions(model, value_info: dict = None):
# if we're running in the ORT python package the file should be local. otherwise assume we're running from the
# ORT repo
script_dir = pathlib.Path(__file__).parent
local_config = script_dir / 'nnapi_supported_ops.md'
if local_config.exists():
config_path = local_config
else:
ort_root = script_dir.parents[3]
config_path = \
ort_root / 'tools' / 'ci_build' / 'github' / 'android' / 'nnapi_supported_ops.md'
return _check_ep_partitioning(model, config_path, value_info)
def check_coreml_partitions(model, value_info: dict = None):
# if we're running in the ORT python package the file should be local. otherwise assume we're running from the
# ORT repo
script_dir = pathlib.Path(__file__).parent
local_config = script_dir / 'coreml_supported_ops.md'
if local_config.exists():
config_path = local_config
else:
ort_root = script_dir.parents[3]
config_path = \
ort_root / 'tools' / 'ci_build' / 'github' / 'apple' / 'coreml_supported_ops.md'
return _check_ep_partitioning(model, config_path, value_info)
def check_shapes(graph: onnx.GraphProto, logger: logging.Logger = None):
'''
Check the shapes of graph inputs, values and graph outputs to determine if they have static or dynamic sizes.
NNAPI and CoreML do not support dynamically sized values.
:param graph: Graph to check. If shape inferencing has been run the checks on values will be meaningful.
:param logger: Optional logger for diagnostic information.
:return: Tuple of List of inputs with dynamic shapes, Number of dynamic values found
'''
# it's OK if the input is dynamically sized and we do a Resize early to a fixed size.
# it's not good if lots of ops have dynamic inputs
num_fixed_values = 0
num_dynamic_values = 0
dynamic_inputs = []
for i in graph.input:
if not is_fixed_size_tensor(i):
dynamic_inputs.append(i)
# split/join to remove repeated whitespace and newlines from str(i)
if logger:
logger.info(f"Input is not a fixed size tensor: {' '.join(str(i).split())}")
num_dynamic_values += 1
else:
num_fixed_values += 1
dynamic_outputs = []
for o in graph.output:
if not is_fixed_size_tensor(o):
dynamic_outputs.append(o)
if logger:
logger.info(f"Output is not a fixed size tensor: {' '.join(str(o).split())}")
num_dynamic_values += 1
else:
num_fixed_values += 1
# check we have value info.
# special case some test graphs with a single node which only have graph input and output values, and
# a model where all inputs are dynamic (results in no value_info)
if not graph.value_info and not (len(graph.node) == 1 or len(dynamic_inputs) == len(graph.input)):
logger.warning("Unable to check shapes within model. "
"ONNX shape inferencing should be run on the model prior to checking.")
for vi in graph.value_info:
if is_fixed_size_tensor(vi):
num_fixed_values += 1
else:
num_dynamic_values += 1
if logger:
logger.info(f"Num values with fixed shape={num_fixed_values}. "
f"Num values with dynamic shape={num_dynamic_values}")
if dynamic_inputs:
if dynamic_outputs:
logger.info("Model has dynamic inputs and outputs. Consider re-exporting model with fixed sizes "
"if NNAPI or CoreML can be used with this model.")
else:
logger.info(
'''Model has dynamically sized inputs but fixed sized outputs.
If the sizes become fixed early in the model (e.g. pre-processing of a dynamic input size
results in a fixed input size for the majority of the model) performance with NNAPI and CoreML,
if applicable, should not be significantly impacted.''')
return dynamic_inputs, num_dynamic_values
def checker(model_path, logger: logging.Logger):
model = onnx.load(model_path)
model_with_shape_info = onnx.shape_inference.infer_shapes(model)
# create lookup map for efficiency
value_to_shape = {}
for v in model_with_shape_info.graph.input:
value_to_shape[v.name] = v
for v in model_with_shape_info.graph.output:
value_to_shape[v.name] = v
for v in model_with_shape_info.graph.value_info:
value_to_shape[v.name] = v
dynamic_inputs, num_dynamic_values = check_shapes(model_with_shape_info.graph)
def check_ep(ep_name, checker_func):
logger.info(f"Checking {ep_name}")
# check with shape info first so supported nodes takes into account values with dynamic shapes
partition_info = checker_func(model_with_shape_info, value_to_shape)
if logger.getEffectiveLevel() <= logging.DEBUG:
partition_info.dump_analysis(logger, ep_name)
suitability = partition_info.suitability()
logger.info(f"Model should perform well with {ep_name} as is: {suitability.name}")
if suitability != PartitioningInfo.TryWithEP.YES and dynamic_inputs:
logger.info("Checking if model will perform better if the dynamic shapes are fixed...")
partition_info_with_fixed_shapes = checker_func(model_with_shape_info)
if logger.getEffectiveLevel() <= logging.DEBUG:
# analyze and log detailed info
logger.info('Partition information if the model was updated to make the shapes fixed:')
partition_info_with_fixed_shapes.dump_analysis(logger, ep_name)
fixed_shape_suitability = partition_info_with_fixed_shapes.suitability()
logger.info(f"Model should perform well with {ep_name} if modified to have fixed input shapes: "
f"{fixed_shape_suitability.name}")
if fixed_shape_suitability != PartitioningInfo.TryWithEP.NO:
logger.info('Shapes can be altered using python -m onnxruntime.tools.make_dynamic_shape_fixed')
if fixed_shape_suitability.value > suitability.value:
suitability = fixed_shape_suitability
return suitability
nnapi_suitability = check_ep("NNAPI", check_nnapi_partitions)
coreml_suitability = check_ep("CoreML", check_coreml_partitions)
if (nnapi_suitability != PartitioningInfo.TryWithEP.YES or coreml_suitability != PartitioningInfo.TryWithEP.YES) \
and logger.getEffectiveLevel() > logging.DEBUG:
logger.info('Re-run with log level of DEBUG for more details on the NNAPI/CoreML issues.')
logger.info('---------------')
return nnapi_suitability != PartitioningInfo.TryWithEP.NO or coreml_suitability != PartitioningInfo.TryWithEP.NO
def analyze_model(model_path: pathlib.Path, skip_optimize: bool = False, logger: logging.Logger = None):
'''
Analyze the provided model to determine if it's likely to work well with the NNAPI or CoreML Execution Providers
:param model_path: Model to analyze.
:param skip_optimize: Skip optimizing to BASIC level before checking. When exporting to ORT format we will do this
optimization..
:param logger: Logger for output
:return: True if either the NNAPI or CoreML Execution Providers may work well with this model.
'''
if not logger:
logger = logging.getLogger('usability_checker')
logger.setLevel(logging.INFO)
logger.info(f'Checking {model_path} for usability with ORT Mobile.')
with tempfile.TemporaryDirectory() as tmp:
if not skip_optimize:
tmp_path = pathlib.Path(tmp) / model_path.name
optimize_model(model_path, tmp_path)
model_path = tmp_path
try_eps = checker(str(model_path.resolve(strict=True)), logger)
return try_eps
def parse_args():
parser = argparse.ArgumentParser(
os.path.basename(__file__),
description='''Analyze an ONNX model for usage with the ORT mobile'''
)
parser.add_argument('--log_level', choices=['debug', 'info', 'warning', 'error'],
default='info', help='Logging level')
parser.add_argument('--skip_optimize', action='store_true',
help="Don't optimize the model to BASIC level prior to analyzing. "
"Optimization will occur when exporting the model to ORT format, so in general "
"should not be skipped unless you have a specific reason to do so.")
parser.add_argument('model_path', type=pathlib.Path, help='Provide path to ONNX model')
return parser.parse_args()
def run_analyze_model():
args = parse_args()
logger = logging.getLogger('default')
if args.log_level == 'debug':
logger.setLevel(logging.DEBUG)
elif args.log_level == 'info':
logger.setLevel(logging.INFO)
elif args.log_level == 'warning':
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.ERROR)
model_path = args.model_path.resolve()
analyze_model(model_path, args.skip_optimize, logger)
if __name__ == '__main__':
run_analyze_model()
|
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime
import six
from celery.exceptions import MaxRetriesExceededError
from celery.schedules import crontab
from celery.task import task
from celery.task.base import periodic_task
from celery.utils.log import get_task_logger
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from couchdbkit import ResourceConflict, BulkSaveError
from casexml.apps.case.mock import CaseBlock
from corehq import toggles
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors, FormAccessors
from corehq.form_processor.models import UserArchivedRebuild
from couchforms.exceptions import UnexpectedDeletedXForm
from corehq.apps.domain.models import Domain
from django.utils.html import format_html
from dimagi.utils.couch.bulk import BulkFetchException
from dimagi.utils.logging import notify_exception
from soil import DownloadBase
from casexml.apps.case.xform import get_case_ids_from_form
from six.moves import map
logger = get_task_logger(__name__)
@task(serializer='pickle')
def bulk_upload_async(domain, user_specs, group_specs):
from corehq.apps.users.bulkupload import create_or_update_users_and_groups
task = bulk_upload_async
DownloadBase.set_progress(task, 0, 100)
results = create_or_update_users_and_groups(
domain,
user_specs,
group_specs,
task=task,
)
DownloadBase.set_progress(task, 100, 100)
return {
'messages': results
}
@task(serializer='pickle')
def bulk_download_users_async(domain, download_id, user_filters):
from corehq.apps.users.bulkupload import dump_users_and_groups, GroupNameError
errors = []
try:
dump_users_and_groups(
domain,
download_id,
user_filters,
bulk_download_users_async,
)
except GroupNameError as e:
group_urls = [
reverse('group_members', args=[domain, group.get_id])
for group in e.blank_groups
]
def make_link(url, i):
return format_html(
'<a href="{}" target="_blank">{}</a>',
url,
_('Blank Group %s') % i
)
group_links = [
make_link(url, i + 1)
for i, url in enumerate(group_urls)
]
errors.append(format_html(
_(
'The following groups have no name. '
'Please name them before continuing: {}'
),
mark_safe(', '.join(group_links))
))
except BulkFetchException:
errors.append(_('Error exporting data. Please try again later.'))
return {
'errors': errors
}
@task(serializer='pickle', rate_limit=2, queue='background_queue', ignore_result=True) # limit this to two bulk saves a second so cloudant has time to reindex
def tag_cases_as_deleted_and_remove_indices(domain, case_ids, deletion_id, deletion_date):
from corehq.apps.sms.tasks import delete_phone_numbers_for_owners
from corehq.messaging.scheduling.tasks import delete_schedule_instances_for_cases
CaseAccessors(domain).soft_delete_cases(list(case_ids), deletion_date, deletion_id)
_remove_indices_from_deleted_cases_task.delay(domain, case_ids)
delete_phone_numbers_for_owners.delay(case_ids)
delete_schedule_instances_for_cases.delay(domain, case_ids)
@task(serializer='pickle', rate_limit=2, queue='background_queue', ignore_result=True, acks_late=True)
def tag_forms_as_deleted_rebuild_associated_cases(user_id, domain, form_id_list, deletion_id,
deletion_date, deleted_cases=None):
"""
Upon user deletion, mark associated forms as deleted and prep cases
for a rebuild.
- 2 saves/sec for cloudant slowness (rate_limit)
"""
deleted_cases = deleted_cases or set()
cases_to_rebuild = set()
for form in FormAccessors(domain).iter_forms(form_id_list):
if form.domain != domain or not form.is_normal:
continue
# rebuild all cases anyways since we don't know if this has run or not if the task was killed
cases_to_rebuild.update(get_case_ids_from_form(form))
# do this after getting case_id's since iter_forms won't return deleted forms
FormAccessors(domain).soft_delete_forms(list(form_id_list), deletion_date, deletion_id)
detail = UserArchivedRebuild(user_id=user_id)
for case_id in cases_to_rebuild - deleted_cases:
_rebuild_case_with_retries.delay(domain, case_id, detail)
def _get_forms_to_modify(domain, modified_forms, modified_cases, is_deletion):
"""Used on user.retire() and user.unretire()
Returns a list of IDs of forms which only modify the cases passed in and
which aren't already listed in `modified_forms`.
"""
form_ids_to_modify = set()
for case_id in modified_cases:
try:
xform_ids = CaseAccessors(domain).get_case(case_id).xform_ids
except CaseNotFound:
xform_ids = []
form_ids_to_modify |= set(xform_ids) - modified_forms
def _is_safe_to_modify(form):
if form.domain != domain:
return False
case_ids = get_case_ids_from_form(form)
# all cases touched by the form and not already modified
for case in CaseAccessors(domain).iter_cases(case_ids - modified_cases):
if case.is_deleted != is_deletion:
# we can't delete/undelete this form - this would change the state of `case`
return False
# all cases touched by this form are deleted
return True
if is_deletion or Domain.get_by_name(domain).use_sql_backend:
all_forms = FormAccessors(domain).iter_forms(form_ids_to_modify)
else:
# accessor.iter_forms doesn't include deleted forms on the couch backend
all_forms = list(map(FormAccessors(domain).get_form, form_ids_to_modify))
return [form.form_id for form in all_forms if _is_safe_to_modify(form)]
@task(serializer='pickle', queue='background_queue', ignore_result=True, acks_late=True)
def tag_system_forms_as_deleted(domain, deleted_forms, deleted_cases, deletion_id, deletion_date):
to_delete = _get_forms_to_modify(domain, deleted_forms, deleted_cases, is_deletion=True)
FormAccessors(domain).soft_delete_forms(to_delete, deletion_date, deletion_id)
@task(serializer='pickle', queue='background_queue', ignore_result=True, acks_late=True)
def undelete_system_forms(domain, deleted_forms, deleted_cases):
"""The reverse of tag_system_forms_as_deleted; called on user.unretire()"""
to_undelete = _get_forms_to_modify(domain, deleted_forms, deleted_cases, is_deletion=False)
FormAccessors(domain).soft_undelete_forms(to_undelete)
@task(serializer='pickle', queue='background_queue', ignore_result=True, acks_late=True)
def _remove_indices_from_deleted_cases_task(domain, case_ids):
if toggles.SKIP_REMOVE_INDICES.enabled(domain):
return
# todo: we may need to add retry logic here but will wait to see
# what errors we should be catching
try:
remove_indices_from_deleted_cases(domain, case_ids)
except BulkSaveError as e:
notify_exception(
None,
"_remove_indices_from_deleted_cases_task "
"experienced a BulkSaveError. errors: {!r}".format(e.errors)
)
raise
def remove_indices_from_deleted_cases(domain, case_ids):
from corehq.apps.hqcase.utils import submit_case_blocks
deleted_ids = set(case_ids)
indexes_referencing_deleted_cases = CaseAccessors(domain).get_all_reverse_indices_info(list(case_ids))
case_updates = [
CaseBlock(
case_id=index_info.case_id,
index={
index_info.identifier: (index_info.referenced_type, '') # blank string = delete index
}
).as_string().decode('utf-8')
for index_info in indexes_referencing_deleted_cases
if index_info.case_id not in deleted_ids
]
device_id = __name__ + ".remove_indices_from_deleted_cases"
submit_case_blocks(case_updates, domain, device_id=device_id)
@task(serializer='pickle', bind=True, queue='background_queue', ignore_result=True,
default_retry_delay=5 * 60, max_retries=3, acks_late=True)
def _rebuild_case_with_retries(self, domain, case_id, detail):
"""
Rebuild a case with retries
- retry in 5 min if failure occurs after (default_retry_delay)
- retry a total of 3 times
"""
from casexml.apps.case.cleanup import rebuild_case_from_forms
try:
rebuild_case_from_forms(domain, case_id, detail)
except (UnexpectedDeletedXForm, ResourceConflict) as exc:
try:
self.retry(exc=exc)
except MaxRetriesExceededError:
notify_exception(
"Maximum Retries Exceeded while rebuilding case {} during deletion.".format(case_id)
)
@periodic_task(
run_every=crontab(hour=23, minute=55),
queue='background_queue',
)
def resend_pending_invitations():
from corehq.apps.users.models import Invitation
days_to_resend = (15, 29)
days_to_expire = 30
domains = Domain.get_all()
for domain_obj in domains:
invitations = Invitation.by_domain(domain_obj.name)
for invitation in invitations:
days = (datetime.utcnow() - invitation.invited_on).days
if days in days_to_resend:
invitation.send_activation_email(days_to_expire - days)
@task(serializer='pickle')
def turn_on_demo_mode_task(commcare_user_id, domain):
from corehq.apps.ota.utils import turn_on_demo_mode
from corehq.apps.users.models import CommCareUser
user = CommCareUser.get(commcare_user_id)
DownloadBase.set_progress(turn_on_demo_mode_task, 0, 100)
results = turn_on_demo_mode(user, domain)
DownloadBase.set_progress(turn_on_demo_mode_task, 100, 100)
return {
'messages': results
}
@task(serializer='pickle')
def reset_demo_user_restore_task(commcare_user_id, domain):
from corehq.apps.ota.utils import reset_demo_user_restore
from corehq.apps.users.models import CommCareUser
user = CommCareUser.get(commcare_user_id)
DownloadBase.set_progress(reset_demo_user_restore_task, 0, 100)
try:
reset_demo_user_restore(user, domain)
results = {'errors': []}
except Exception as e:
notify_exception(None, message=six.text_type(e))
results = {'errors': [
_("Something went wrong in creating restore for the user. Please try again or report an issue")
]}
DownloadBase.set_progress(reset_demo_user_restore_task, 100, 100)
return {'messages': results}
@task(serializer='pickle')
def remove_unused_custom_fields_from_users_task(domain):
from corehq.apps.users.custom_data import remove_unused_custom_fields_from_users
remove_unused_custom_fields_from_users(domain)
@task()
def update_domain_date(user_id, domain):
from corehq.apps.users.models import WebUser
user = WebUser.get_by_user_id(user_id, domain)
domain_membership = user.get_domain_membership(domain)
if domain_membership:
domain_membership.last_accessed = datetime.today().date()
user.save()
else:
logger.error("DomainMembership does not exist for user %s in domain %s" % (user.name, domain))
|
import unittest
import random
import multiprocessing
import numpy as np
import warnings
from sklearn.metrics import accuracy_score, explained_variance_score
from sklearn.datasets import make_classification
from sklearn.feature_selection import f_classif, f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from pipecaster.multichannel_pipeline import MultichannelPipeline
from pipecaster.channel_selection import SelectKBestScores, SelectKBestProbes
from pipecaster.testing_utils import make_multi_input_classification, make_multi_input_regression
import pipecaster.parallel as parallel
n_cpus = multiprocessing.cpu_count()
class TestChannelSelectors(unittest.TestCase):
### TEST CHANNEL SELECTORS USING SYNTHETIC CLASSIFICATION DATA ###
def setUp(self):
warnings.filterwarnings("ignore")
def tearDown(self):
warnings.resetwarnings()
@staticmethod
def _select_synthetic_classification(channel_selector, n_informative_Xs=3,
n_weak_Xs=0, n_random_Xs=0,
weak_noise_sd=1.0, verbose = 0,
seed=None, **sklearn_params):
n_Xs = n_informative_Xs + n_weak_Xs + n_random_Xs
Xs, y, X_types = make_multi_input_classification(
n_informative_Xs, n_weak_Xs, n_random_Xs,
weak_noise_sd, seed, **sklearn_params)
clf = MultichannelPipeline(n_channels=n_Xs)
clf.add_layer(StandardScaler())
clf.add_layer(channel_selector)
clf.fit(Xs, y)
Xs_t = clf.transform(Xs)
Xs_selected = ['selected' if X is not None
else 'not selected' for X in Xs_t]
n_informative_hits, n_random_hits, n_weak_hits = 0, 0, 0
for X, t in zip(Xs_selected, X_types):
if X == 'selected' and t == 'informative':
n_informative_hits +=1
if X == 'not selected' and t == 'random':
n_random_hits +=1
if X == 'selected' and t == 'weak':
n_weak_hits +=1
if verbose > 0:
print('InputSelector selected {} out of {} informative inputs'
.format(n_informative_hits, n_informative_Xs))
print('InputSelector filtered out {} out of {} random inputs'
.format(n_random_hits, n_Xs - n_informative_Xs - n_weak_Xs))
print('InputSelector selected out {} out of {} weakly informative inputs'
.format(n_weak_hits, n_weak_Xs))
return n_informative_hits, n_random_hits, n_weak_hits
@staticmethod
def _test_weak_strong_cls_input_discrimination(channel_selector, n_weak = 5, n_strong = 5,
weak_noise_sd = 0.25, verbose=0, seed = None, **sklearn_params):
n_random = n_weak + n_strong
n_Xs = n_weak + n_strong + n_random
n_informative_hits, n_random_hits, n_weak_hits = TestChannelSelectors._select_synthetic_classification(channel_selector,
n_informative_Xs=n_strong,
n_weak_Xs=n_weak,
n_random_Xs=n_random,
weak_noise_sd=weak_noise_sd,
verbose=verbose, seed=seed, **sklearn_params)
passed = True
if n_informative_hits != n_strong:
passed = False
if n_weak_hits != 0:
passed = False
if n_random_hits != (n_Xs - n_weak - n_strong):
passed = False
return passed
@staticmethod
def _test_weak_cls_input_detection(channel_selector, n_weak = 5, n_strong = 5, weak_noise_sd = 0.25,
verbose=0, seed = None, **sklearn_params):
n_random = n_weak + n_strong
n_Xs = n_weak + n_strong + n_random
n_informative_hits, n_random_hits, n_weak_hits = TestChannelSelectors._select_synthetic_classification(channel_selector,
n_informative_Xs=n_strong,
n_weak_Xs=n_weak,
n_random_Xs=n_random,
weak_noise_sd=weak_noise_sd,
verbose=verbose, seed=seed, **sklearn_params)
passed = True
if n_informative_hits != n_strong:
passed = False
if n_weak_hits != n_weak:
passed = False
if n_random_hits != (n_Xs - n_weak - n_strong):
passed = False
return passed
def test_SelectKBestScores_weak_strong_cls_input_discrimination(self, verbose=0, seed=42):
k = 5
sklearn_params = {'n_classes':2,
'n_samples':1000,
'n_features':100,
'n_informative':20,
'n_redundant':0,
'n_repeated':0,
'class_sep':2.0}
channel_selector = SelectKBestScores(feature_scorer=f_classif, aggregator=np.mean, k=k)
passed = TestChannelSelectors._test_weak_strong_cls_input_discrimination(channel_selector, n_weak = k,
n_strong = k, weak_noise_sd = 30,
verbose=verbose, seed = seed, **sklearn_params)
self.assertTrue(passed, 'SelectKBestScores failed to discriminate between weak & strong classification input matrices')
def test_SelectKBestScores_weak_cls_input_detection(self, verbose=0, seed=42):
k = 10
sklearn_params = {'n_classes':2,
'n_samples':1000,
'n_features':100,
'n_informative':20,
'n_redundant':0,
'n_repeated':0,
'class_sep':2.0}
channel_selector = SelectKBestScores(feature_scorer=f_classif, aggregator=np.mean, k=k)
passed = TestChannelSelectors._test_weak_cls_input_detection(channel_selector, n_weak = int(k/2),
n_strong = k - int(k/2), weak_noise_sd = 0.2,
verbose=verbose, seed=seed, **sklearn_params)
self.assertTrue(passed, 'SelectKBestScores failed to detect all weak clasification input matrices')
def test_SelectKBestProbes_weak_strong_cls_input_discrimination(self,
verbose=0,
seed=42):
k = 5
sklearn_params = {'n_classes':2,
'n_samples':2000,
'n_features':20,
'n_informative':10,
'n_redundant':0,
'n_repeated':0,
'class_sep':2.0}
probe = KNeighborsClassifier(
n_neighbors=5, weights='uniform')
channel_selector = SelectKBestProbes(predictor_probe=probe, cv=3,
score_method='predict', scorer=accuracy_score, k=k,
channel_processes=n_cpus, cv_processes=1)
passed = TestChannelSelectors._test_weak_strong_cls_input_discrimination(channel_selector, n_weak=k,
n_strong=k, weak_noise_sd=50,
verbose=verbose, seed=seed, **sklearn_params)
self.assertTrue(passed, 'SelectKBestProbes failed to discriminate between weak & strong classification input matrices')
def test_SelectKBestProbes_weak_cls_input_detection(self, verbose=0, seed=42):
k = 10
sklearn_params = {'n_classes':2,
'n_samples':2000,
'n_features':20,
'n_informative':15,
'n_redundant':0,
'n_repeated':0,
'class_sep':2.0}
probe = KNeighborsClassifier(n_neighbors=5, weights='uniform')
channel_selector = SelectKBestProbes(
predictor_probe=probe, cv=3, score_method='predict',
scorer=accuracy_score, k=k, channel_processes=n_cpus,
cv_processes=1)
passed = TestChannelSelectors._test_weak_cls_input_detection(
channel_selector, n_weak = int(k/2), n_strong = k - int(k/2),
weak_noise_sd = 1, verbose=verbose, seed=seed, **sklearn_params)
self.assertTrue(passed, 'KBestPerformers failed to detect all weak '
'clasification input matrices')
### TEST CHANNEL SELECTORS USING SYNTHETIC REGRESSION DATA ###
@staticmethod
def _select_synthetic_regression(channel_selector, n_informative_Xs=5, n_weak_Xs=0, n_random_Xs=0,
weak_noise_sd=None, verbose = 0, seed = None, **rgr_params):
n_Xs = n_informative_Xs + n_weak_Xs + n_random_Xs
Xs, y, X_types = make_multi_input_regression(n_informative_Xs, n_weak_Xs,
n_random_Xs, weak_noise_sd,
seed, **rgr_params)
clf = MultichannelPipeline(n_channels=n_Xs)
clf.add_layer(StandardScaler())
clf.add_layer(channel_selector)
Xs_t = clf.fit_transform(Xs, y)
Xs_selected = ['selected' if X is not None else 'not selected' for X in Xs_t]
n_informative_hits, n_random_hits, n_weak_hits = 0, 0, 0
for X, t in zip(Xs_selected, X_types):
if X == 'selected' and t == 'informative':
n_informative_hits +=1
if X == 'not selected' and t == 'random':
n_random_hits +=1
if X == 'selected' and t == 'weak':
n_weak_hits +=1
if verbose > 0:
print('InputSelector selected {} out of {} informative inputs'
.format(n_informative_hits, n_informative_Xs))
print('InputSelector filtered out {} out of {} random inputs'
.format(n_random_hits, n_Xs - n_informative_Xs - n_weak_Xs))
print('InputSelector selected out {} out of {} weakly informative inputs'
.format(n_weak_hits, n_weak_Xs))
return n_informative_hits, n_random_hits, n_weak_hits
@staticmethod
def _test_weak_strong_rgr_input_discrimination(channel_selector, n_weak=5, n_strong=5,
weak_noise_sd=0.25, verbose=0, seed=None, **rgr_params):
n_random = n_weak + n_strong
n_Xs = n_weak + n_strong + n_random
n_informative_hits, n_random_hits, n_weak_hits = TestChannelSelectors._select_synthetic_regression(channel_selector,
n_informative_Xs=n_strong,
n_weak_Xs=n_weak,
n_random_Xs=n_random,
weak_noise_sd=weak_noise_sd,
verbose=verbose, seed=seed,
**rgr_params)
passed = True
if n_informative_hits != n_strong:
passed = False
if n_weak_hits != 0:
passed = False
if n_random_hits != (n_Xs - n_weak - n_strong):
passed = False
return passed
@staticmethod
def _test_weak_rgr_input_detection(channel_selector, n_weak=5, n_strong=5,
weak_noise_sd=0.25, verbose=0, seed = None, **rgr_params):
n_random = n_weak + n_strong
n_Xs = n_weak + n_strong + n_random
n_informative_hits, n_random_hits, n_weak_hits = TestChannelSelectors._select_synthetic_regression(channel_selector,
n_informative_Xs=n_strong,
n_weak_Xs=n_weak,
n_random_Xs=n_random,
weak_noise_sd=weak_noise_sd,
verbose=verbose, seed = seed,
**rgr_params)
passed = True
if n_informative_hits != n_strong:
passed = False
if n_weak_hits != n_weak:
passed = False
if n_random_hits != (n_Xs - n_weak - n_strong):
passed = False
return passed
def test_SelectKBestScores_weak_strong_rgr_input_discrimination(self, verbose=0, seed=42):
k = 5
rgr_params = {'n_samples':2000,
'n_features':30,
'n_informative':20
}
channel_selector = SelectKBestScores(feature_scorer=f_regression, aggregator=np.mean, k=k)
passed = TestChannelSelectors._test_weak_strong_rgr_input_discrimination(channel_selector, n_weak=k,
n_strong=k, weak_noise_sd=10,
verbose=verbose, seed=seed, **rgr_params)
self.assertTrue(passed, 'SelectKBestScores failed to discriminate between weak & strong regression input matrices')
def test_SelectKBestScores_weak_rgr_input_detection(self, verbose=0, seed=42):
k = 10
rgr_params = {'n_samples':2000,
'n_features':30,
'n_informative':20
}
channel_selector = SelectKBestScores(feature_scorer=f_regression, aggregator=np.mean, k=k)
passed = TestChannelSelectors._test_weak_rgr_input_detection(channel_selector, n_weak=int(k/2),
n_strong=k - int(k/2), weak_noise_sd=0.2,
verbose=verbose, seed=seed, **rgr_params)
self.assertTrue(passed, 'SelectKBestScores failed to detect all week regression input matrices')
def test_SelectKBestProbes_weak_strong_rgr_input_discrimination(self, verbose=0, seed=42):
k = 5
rgr_params = {'n_samples':2000,
'n_features':10,
'n_informative':5
}
channel_selector = SelectKBestProbes(predictor_probe=RandomForestRegressor(n_estimators=20, max_depth=2),
cv=3, scorer=explained_variance_score, k=k,
channel_processes=n_cpus, cv_processes=1)
passed = TestChannelSelectors._test_weak_strong_rgr_input_discrimination(channel_selector, n_weak=k,
n_strong=k, weak_noise_sd=30,
verbose=verbose, seed=seed, **rgr_params)
self.assertTrue(passed, 'SelectKBestProbes failed to discriminate between weak & strong regression input matrices')
def test_SelectKBestProbes_weak_rgr_input_detection(self, verbose=0, seed=42):
k = 10
rgr_params = {'n_samples':2000,
'n_features':10,
'n_informative':5
}
channel_selector = SelectKBestProbes(predictor_probe=RandomForestRegressor(n_estimators=25, max_depth=2),
cv=3, scorer=explained_variance_score, k=k, channel_processes=n_cpus, cv_processes=1)
passed = TestChannelSelectors._test_weak_rgr_input_detection(channel_selector, n_weak=int(k/2),
n_strong=k - int(k/2), weak_noise_sd=0.2,
verbose=verbose, seed=seed, **rgr_params)
self.assertTrue(passed, 'SelectKBestProbes failed to detect all week regression input matrices')
if __name__ == '__main__':
unittest.main()
|
from settings.settings import Settings
class AWSSettings(Settings):
def __init__(self):
super().__init__()
self.raw_bucket = 'data-pipeline-demo-raw'
self.enriched_bucket = 'data-pipeline-demo-enriched'
|
import fire
def hello(name):
"""
python3 fn.py Yazid
> Hello Yazid!
"""
return 'Hello {name}!'.format(name=name)
if __name__ == '__main__':
fire.Fire(hello)
|
from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option
import logging
import os
import sys
import log_helper
from datetime import datetime
import uuid
import json
import splunk.rest as rest
import boto3
debug_logger = log_helper.setup(logging.INFO, 'GetAWSPriceListDebug', 'get_aws_price_list_debug.log')
pid = os.getpid()
tstart = datetime.now()
guid = str(uuid.uuid4().hex)
def simple_request_messages_to_str(messages):
"""
Returns a readable string from a simple request response message
Arguments
messages -- The simple request response message to parse
"""
entries = []
for message in messages:
entries.append(message.get('text'))
return ','.join(entries)
def simple_request_eai(url, action, method, session_key, params=None):
"""
Returns the payload response from a simpleRequest call
Arguments
url -- The REST handler endpoint to use in the simpleRequest
action -- The readable requested action used in logs
method -- The REST method to make the request with
session_key -- The valid session key which will be used in the request
params -- The parameters sent in the POST body of the simpleRequest
"""
if not params:
params = {}
debug_logger.info(
'action=http_internal_request state=start method=%s url=%s pid=%s guid=%s' % (method, url, pid, guid))
try:
response, content = rest.simpleRequest(
url,
getargs=dict(output_mode='json'),
postargs=params,
method=method,
sessionKey=session_key
)
except Exception, e:
debug_logger.error('action=http_internal_request state=error error="%s" pid=%s guid=%s' % (e, pid, guid))
raise Exception('Unable to %s %s entry. %s' % (action, url, e))
debug_logger.info('action=http_internal_request state=end pid=%s guid=%s' % (pid, guid))
try:
payload = json.loads(content)
except Exception, e:
debug_logger.error('action=http_internal_request state=error error="%s"' % e)
raise Exception('Unable to parse %s response payload.' % url)
if response.status not in [200, 201]:
message = simple_request_messages_to_str(response.messages)
debug_logger.error('action=http_internal_request state=error error="%s"' % message)
raise Exception(
'Unable to %s %s entry. %s' % (action, url, message))
return payload
@Configuration()
class GetAWSPriceListCommand(GeneratingCommand):
aws_master_account_id = Option(require=True)
service_code = Option(require=True)
def generate(self):
session_key = self._metadata.searchinfo.session_key
grand_central_aws_accounts_rest_path = '/servicesNS/%s/%s/grand_central_aws_accounts/%s' % ('nobody', 'grand_central', self.aws_master_account_id)
grand_central_aws_accounts_eai_response_payload = simple_request_eai(grand_central_aws_accounts_rest_path, 'read', 'GET', session_key)
grand_central_aws_account = grand_central_aws_accounts_eai_response_payload['entry'][0]
aws_secret_key_link_alternate = grand_central_aws_account['content']['aws_secret_key_link_alternate']
aws_access_key = grand_central_aws_account['content']['aws_access_key']
passwords_conf_payload = simple_request_eai(aws_secret_key_link_alternate, 'list', 'GET', session_key)
SECRET_KEY = passwords_conf_payload['entry'][0]['content']['clear_password']
client = boto3.client('pricing', aws_access_key_id=aws_access_key, aws_secret_access_key=SECRET_KEY, region_name='us-east-1')
response = client.get_products(ServiceCode=self.service_code)
for price in response['PriceList']:
yield {"_raw": price}
while 'NextToken' in response:
response = client.get_products(ServiceCode=self.service_code, NextToken=response['NextToken'])
for price in response['PriceList']:
yield {"_raw": price}
dispatch(GetAWSPriceListCommand, sys.argv, sys.stdin, sys.stdout, __name__)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Tim(MakefilePackage):
homepage = "https://github.com/buildsi/build-abi-test-tim"
git = "https://github.com/buildsi/build-abi-test-tim.git"
version('main', branch='main')
# Does not have releases yet!
depends_on("elfutils")
def install(self, spec, prefix):
mkdir(prefix.bin)
mkdir(prefix.lib)
install('test', prefix.bin)
install('reader', prefix.bin)
install('libfoo.so', prefix.lib)
|
"""Three-dimensional Cone-beam Tomography simulation.
The struture study from Nikolay.st_sim.py
Generate intensity frames based on Fresnel diffraction
theory. :class:`TomoSim` does the heavy lifting of calculating
the wavefront propagation to the detector plane.
:class:`STConverter` exports simulated data to a `CXI`_ format
file accordingly to the provided :class:`Protocol` object and
saves the protocol and experimental parameters to the same folder.
Logs of the simulation process are saved in `logs` folder.
.. _CXI: https://www.cxidb.org/cxi.html
"""
from __future__ import division
from __future__ import absolute_import
import os
import logging
import datetime
import argparse
import numpy as np
import scipy as sp
import h5py
from ..protocol import cxi_protocol, ROOT_PATH
from ..data_processing import TomoData
from .tomo_sim_param import TomoParams, parameters
from ..bin import aperture_wp, barcode_steps, barcode_profile, lens_wp
from ..bin import make_frames, make_whitefield
from ..bin import fraunhofer_1d, fraunhofer_1d_scan
class tomoSim:
"""Three-dimensional Cone-beam Tomography simulation class.
Generates circle's transmission profile and lens' abberated
wavefront, whereupon propagates the wavefront to the detector's
plane. Logs all the steps to `logs` folder.
Parameters
----------
tomo_params : TomoParams
Experimental parameters.
bsteps : numpy.ndarray, optional (should change)
Array of barcode's bar coordinates. Generates the array
automatically if it's not provided.
See Also
--------
st_sim_param : Full list of experimental parameters.!!
"""
# generate the log file during calculation
log_dir = os.path.join(ROOT_PATH, '../logs')
def __init__(self,tomo_sim_params, bsteps=None):
self.parameters = tomo_sim_params
self._init_logging()
self._init_coord()
self._init_lens()
self._init_barcode(bsteps)
self._init_detector()
def __getattr__(self,attr):
"""
Get the value of initial parameters form the file
"""
if attr in self.parameters:
return self.parameters.__getattr__(attr)
def _init_logging(self):
"""
Initial the log file
"""
os.makedirs(self.log_dir, exist_ok=True)
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.level = logging.INFO
filename = os.path.join(
self.log_dir,
datetime.datetime.now().strftime('%d-%m-%Y_%H-%M-%S.log'))
self.logger.addHandler(logging.FileHandler(filename))
if self.verbose:
self.logger.addHandler(logging.StreamHandler(stdout))
for handler in self.logger.handlers:
handler.setFormatter(
logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
self.logger.info('Initializing')
self.logger.info('Current parameters')
def _init_coord(self):
# Initializing coordinate parameters
xx_span = self.fs_size * self.pix_size
yy_span = self.ss_size * self.pix_size
x_span = 1.6 * self.ap_x / self.focus * self.defocus
y_span = 1.2 * self.ap_y
n_x = int(x_span * xx_span / self.wl / self.det_dist)
n_y = int(y_span * yy_span / self.wl / self.det_dist)
# Initializing coordinate arrays
self.logger.info(
"Initializing coordinate arrays at the sample's plane")
self.logger.info('Number of points in x axis: {:d}'.format(n_x))
self.logger.info('Number of points in y axis: {:d}'.format(n_y))
self.x_arr = np.linspace(-x_span / 2, x_span / 2, n_x)
self.y_arr = np.linspace(-y_span / 2, y_span / 2, n_y)
# Space disperse
self.xx_arr = np.linspace(-xx_span / 2,
xx_span / 2,
self.fs_size,
endpoint=False)
self.yy_arr = np.linspace(-yy_span / 2,
yy_span / 2,
self.ss_size,
endpoint=False)
def _init_lens(self):
#Initializing wavefields at the sample's plane
self.logger.info("Generating wavefields at the sample's plane")
self.wf0_x = lens_wp(x_arr=self.x_arr,
wl=self.wl,
ap=self.ap_x,
focus=self.focus,
defoc=self.defocus,
alpha=self.alpha,
xc=(self.x0 - 0.5) * self.ap_x)
self.wf0_y = aperture_wp(x_arr=self.y_arr,
z=self.focus + self.defocus,
wl=self.wl,
ap=self.ap_y)
self.i0 = self.p0 / self.ap_x / self.ap_y
self.smp_c = 1 / self.wl / (self.focus + self.defocus)
self.logger.info("The wavefields have been generated")
|
from assertpy import assert_that
from click.testing import CliRunner
from elementalcms.core import FlaskContext
from elementalcms.management import cli
from tests import EphemeralElementalFileSystem
class TestCreateCommandShould:
def test_fail_when_language_is_not_supported(self, default_elemental_fixture, default_settings_fixture):
runner = CliRunner()
with runner.isolated_filesystem():
with EphemeralElementalFileSystem(default_elemental_fixture, default_settings_fixture):
# noinspection PyTypeChecker
result = runner.invoke(cli, ['pages',
'create',
'-p', 'home', 'ru'])
assert_that(result.output).contains('language is not supported')
def test_fail_when_spec_file_already_exist(self, default_elemental_fixture, default_settings_fixture):
runner = CliRunner()
with runner.isolated_filesystem():
root_folder_path = FlaskContext(default_settings_fixture['cmsCoreContext']).PAGES_FOLDER
folder_path = f'{root_folder_path}/en'
with EphemeralElementalFileSystem(default_elemental_fixture, default_settings_fixture, [
(f'{folder_path}/home.json', '...')
]):
# noinspection PyTypeChecker
result = runner.invoke(cli, ['pages',
'create',
'-p', 'home', 'en'])
assert_that(result.output).contains('already exist')
def test_create_page_spec_file(self, default_elemental_fixture, default_settings_fixture):
runner = CliRunner()
with runner.isolated_filesystem():
with EphemeralElementalFileSystem(default_elemental_fixture, default_settings_fixture):
# noinspection PyTypeChecker
runner.invoke(cli, ['pages',
'create',
'--page', 'home', 'en'])
root_folder_path = FlaskContext(default_settings_fixture["cmsCoreContext"]).PAGES_FOLDER
folder_path = f'{root_folder_path}/en'
assert_that(f'{folder_path}/home.json').exists()
def test_create_page_content_file(self, default_elemental_fixture, default_settings_fixture):
runner = CliRunner()
with runner.isolated_filesystem():
with EphemeralElementalFileSystem(default_elemental_fixture, default_settings_fixture):
# noinspection PyTypeChecker
runner.invoke(cli, ['pages',
'create',
'--page', 'home', 'en'])
root_folder_path = FlaskContext(default_settings_fixture["cmsCoreContext"]).PAGES_FOLDER
folder_path = f'{root_folder_path}/en'
assert_that(f'{folder_path}/home.html').exists()
def test_display_success_feedback_message(self, default_elemental_fixture, default_settings_fixture):
runner = CliRunner()
with runner.isolated_filesystem():
with EphemeralElementalFileSystem(default_elemental_fixture, default_settings_fixture):
# noinspection PyTypeChecker
result = runner.invoke(cli, ['pages',
'create',
'--page', 'home', 'en'])
assert_that(result.output).contains('home.json|html files has been created successfully.')
|
# coding: utf8
'''
# -------------------------------------------------------------------------------------------------------
# CONVERTER GEODATABASE
# -------------------------------------------------------------------------------------------------------
# Michel Metran
# Setembro de 2017
# Python Script to ArcGIS
# Description: Geras APPs da rede hidrográfica vetorizada do IGC
'''
# -------------------------------------------------------------------------------------------------------
# Módulos e Codificação
import os
import sys
import arcpy
import arcpy.cartography as CA # Necessário para o Smooth
reload(sys)
sys.setdefaultencoding('utf8')
# -------------------------------------------------------------------------------------------------------
# Variável de Input
geodatabase = r'E:\SIG_MP_BasesCartograficas\SP_IGC_10k\Geodata\Geo_IGC.mdb'
in_FeatureDataSet = 'Vetorizacao'
out_FeatureDataSet = 'Resultados'
# APPs em Metros
APP_Nascentes = 50 # Inciso I, Artigo 3º, Resolução Conama nº 302/02
APP_CursoDagua = 30 # Alínea 'a', Inciso I, Artigo 3º, Resolução Conama nº 302/02
APP_RioMaior10m = 0 # Alínea 'b', 'c', 'd', e 'e', Inciso I, Artigo 3º, Resolução Conama nº 302/02
APP_Represa_NascentePerene = 50 # Inciso I, Artigo 3º, Resolução Conama nº 302/02
APP_Represa_NascenteIntermitente = 50 # Inciso I, Artigo 3º, Resolução Conama nº 302/02
APP_Represa_Menor20ha = 15 # Inciso I, Artigo 3º, Resolução Conama nº 302/02
APP_Represa_Maior20ha = 100 # Inciso I, Artigo 3º, Resolução Conama nº 302/02
APP_Lagoa_Menor20ha = 50 # Alínea 'b', Inciso I, Artigo 3º, Resolução Conama nº 303/02
APP_Lagoa_Maior20ha = 100 # Alínea 'b', Inciso I, Artigo 3º, Resolução Conama nº 303/02
APP_Varzea = 30
# -------------------------------------------------------------------------------------------------------
# Print Variables
in_pathFeatureDataSet = os.path.join(geodatabase, in_FeatureDataSet)
out_pathFeatureDataSet = os.path.join(geodatabase, out_FeatureDataSet)
temp_pathFeatureDataSet = os.path.join(geodatabase, in_FeatureDataSet + "_temp")
arcpy.Delete_management(temp_pathFeatureDataSet, 'FeatureDataset')
arcpy.CreateFeatureDataset_management(geodatabase, in_FeatureDataSet + "_temp", in_pathFeatureDataSet)
print 'Geodatabase: ' + geodatabase
print 'Feature DataSet Input: ' + in_pathFeatureDataSet
print 'Feature DataSet Output: ' + out_pathFeatureDataSet
print 'Feature DataSet Temp: ' + temp_pathFeatureDataSet
# -------------------------------------------------------------------------------------------------------
# Variáveis de Ambiente do ArcGIS
arcpy.ResetEnvironments()
arcpy.env.overwriteOutput = True
arcpy.env.workspace = temp_pathFeatureDataSet
# -------------------------------------------------------------------------------------------------------
# Input: Cursos d'água
print "## Etapa 1: APP dos Cursos d'Água"
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Linhas'),
'Hidro_Linhas_01_Select',
"[Tipo] = 'Perene'")
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Linhas'),
'Hidro_Linhas_02_Select',
"[Tipo] = 'Intermitente' OR [Tipo] = 'Perene'")
CA.SmoothLine('Hidro_Linhas_01_Select',
'Hidro_Linhas_03_Smooth',
'PAEK', '30 Meters', 'NO_FIXED', 'FLAG_ERRORS')
CA.SmoothLine('Hidro_Linhas_02_Select',
'Hidro_Linhas_04_Smooth',
'PAEK', '30 Meters', 'NO_FIXED', 'FLAG_ERRORS')
arcpy.Buffer_analysis('Hidro_Linhas_03_Smooth',
'Hidro_Linhas_05_Buffer',
str(APP_CursoDagua) + ' Meters',
'FULL', 'ROUND', 'NONE')
arcpy.Buffer_analysis('Hidro_Linhas_04_Smooth',
'Hidro_Linhas_06_Buffer',
str(APP_CursoDagua) + ' Meters',
'FULL', 'ROUND', 'NONE')
# -------------------------------------------------------------------------------------------------------
# Input: Nascentes
print '## Etapa 2: APP das Nascentes'
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Pontos'),
'Hidro_Pontos_01_Select',
"[Tipo] = 'Perene'")
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Pontos'),
'Hidro_Pontos_02_Select',
"[Tipo] = 'Intermitente' OR [Tipo] = 'Perene'")
arcpy.Buffer_analysis('Hidro_Pontos_01_Select',
'Hidro_Pontos_03_Buffer',
str(APP_Nascentes) + ' Meters',
'FULL', 'ROUND', 'NONE')
arcpy.Buffer_analysis('Hidro_Pontos_02_Select',
'Hidro_Pontos_04_Buffer',
str(APP_Nascentes) + ' Meters',
'FULL', 'ROUND', 'NONE')
# -------------------------------------------------------------------------------------------------------
# Input: Rios Maiores que 10 metros
print '## Etapa 3: APP dos rios maiores 10 metros'
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Poligonos'),
'Hidro_Poligonos_01_Select', "[Tipo] = 'Rio'")
CA.SmoothPolygon('Hidro_Poligonos_01_Select',
'Hidro_Poligonos_02_Smooth',
'PAEK', '30 Meters', 'NO_FIXED', 'FLAG_ERRORS')
if APP_RioMaior10m == 0:
try:
arcpy.Buffer_analysis('Hidro_Poligonos_02_Smooth',
'Hidro_Poligonos_03_Buffer',
'APP', 'FULL', 'ROUND', 'NONE')
except arcpy.ExecuteError:
print arcpy.GetMessages()
elif APP_RioMaior10m > 0:
try:
arcpy.Buffer_analysis('Hidro_Poligonos_02_Smooth',
'Hidro_Poligonos_03_Buffer',
str(APP_RioMaior10m) + ' Meters',
'FULL', 'ROUND', 'NONE')
except arcpy.ExecuteError:
print arcpy.GetMessages()
else:
pass
# -------------------------------------------------------------------------------------------------------
# Input: Polígonos
print '## Etapa 4: APP das represas'
arcpy.CalculateField_management(os.path.join(in_pathFeatureDataSet, 'Hidro_Poligonos'),
'Area_ha', '!shape.area@hectares!', 'PYTHON_9.3')
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Poligonos'),
'Hidro_Poligonos_04_Select',
"[Tipo] = 'Nascente Perene'")
arcpy.Buffer_analysis('Hidro_Poligonos_04_Select',
'Hidro_Poligonos_05_Buffer',
str(APP_Represa_NascentePerene) + ' Meters',
'FULL', 'ROUND', 'NONE')
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Poligonos'),
'Hidro_Poligonos_06_Select',
"[Tipo] = 'Nascente Intermitente'")
arcpy.Buffer_analysis('Hidro_Poligonos_06_Select',
'Hidro_Poligonos_07_Buffer',
str(APP_Represa_NascenteIntermitente ) + ' Meters',
'FULL', 'ROUND', 'NONE')
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Poligonos'),
'Hidro_Poligonos_08_Select',
"[Tipo] = 'Represa' AND [Area_ha] < 20")
arcpy.Buffer_analysis('Hidro_Poligonos_08_Select',
'Hidro_Poligonos_09_Buffer',
str(APP_Represa_Menor20ha) + ' Meters',
'FULL', 'ROUND', 'NONE')
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Poligonos'),
'Hidro_Poligonos_10_Select',
"[Tipo] = 'Represa' AND [Area_ha] >= 20")
arcpy.Buffer_analysis('Hidro_Poligonos_10_Select',
'Hidro_Poligonos_11_Buffer',
str(APP_Represa_Maior20ha) + ' Meters',
'FULL', 'ROUND', 'NONE')
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Poligonos'),
'Hidro_Poligonos_12_Select',
"[Tipo] = 'Lagoa' AND [Area_ha] < 20")
arcpy.Buffer_analysis('Hidro_Poligonos_12_Select',
'Hidro_Poligonos_13_Buffer',
str(APP_Lagoa_Menor20ha) + ' Meters',
'FULL', 'ROUND', 'NONE')
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Poligonos'),
'Hidro_Poligonos_14_Select',
"[Tipo] = 'Lagoa' AND [Area_ha] >= 20")
arcpy.Buffer_analysis('Hidro_Poligonos_14_Select',
'Hidro_Poligonos_15_Buffer',
str(APP_Lagoa_Maior20ha) + ' Meters',
'FULL', 'ROUND', 'NONE')
arcpy.Select_analysis(os.path.join(in_pathFeatureDataSet, 'Hidro_Poligonos'),
'Hidro_Poligonos_16_Select',
"[Tipo] = 'Várzea'")
arcpy.Buffer_analysis('Hidro_Poligonos_16_Select',
'Hidro_Poligonos_17_Buffer',
str(APP_Varzea) + ' Meters',
'FULL', 'ROUND', 'NONE')
# -------------------------------------------------------------------------------------------------------
# Análises: Unindo os Polígonos que geram APPs
print '## Etapa 5: Unindo os Polígonos que geram APPs'
arcpy.Merge_management(['Hidro_Poligonos_03_Buffer',
'Hidro_Poligonos_05_Buffer',
'Hidro_Poligonos_09_Buffer',
'Hidro_Poligonos_11_Buffer',
'Hidro_Poligonos_13_Buffer',
'Hidro_Poligonos_15_Buffer',
'Hidro_Poligonos_17_Buffer'],
'Hidro_Poligonos_18_Merge')
arcpy.Merge_management(['Hidro_Poligonos_03_Buffer',
'Hidro_Poligonos_05_Buffer',
'Hidro_Poligonos_07_Buffer',
'Hidro_Poligonos_09_Buffer',
'Hidro_Poligonos_11_Buffer',
'Hidro_Poligonos_13_Buffer',
'Hidro_Poligonos_15_Buffer',
'Hidro_Poligonos_17_Buffer'],
'Hidro_Poligonos_19_Merge')
# -------------------------------------------------------------------------------------------------------
# Análises: Unindo APPs
print '## Etapa 6: Unindo APPs'
arcpy.Merge_management(['Hidro_Linhas_05_Buffer',
'Hidro_Pontos_03_Buffer',
'Hidro_Poligonos_03_Buffer',
'Hidro_Poligonos_18_Merge'],
'APP_01_Merge')
arcpy.Merge_management(['Hidro_Linhas_06_Buffer',
'Hidro_Pontos_04_Buffer',
'Hidro_Poligonos_03_Buffer',
'Hidro_Poligonos_19_Merge'],
'APP_02_Merge')
arcpy.Dissolve_management('APP_01_Merge',
'APP_03_Dissolve',
'', '', 'MULTI_PART',
'DISSOLVE_LINES')
arcpy.Dissolve_management('APP_02_Merge',
'APP_04_Dissolve',
'', '', 'MULTI_PART',
'DISSOLVE_LINES')
# -------------------------------------------------------------------------------------------------------
# Análises: Removendo polígonos
print '## Etapa 7: Removendo polígonos'
arcpy.Merge_management(['Hidro_Poligonos_01_Select',
'Hidro_Poligonos_04_Select',
'Hidro_Poligonos_08_Select',
'Hidro_Poligonos_10_Select',
'Hidro_Poligonos_12_Select',
'Hidro_Poligonos_14_Select',
'Hidro_Poligonos_16_Select'],
'Hidro_Poligonos_20_Merge')
arcpy.Merge_management(['Hidro_Poligonos_01_Select',
'Hidro_Poligonos_06_Select',
'Hidro_Poligonos_08_Select',
'Hidro_Poligonos_10_Select',
'Hidro_Poligonos_12_Select',
'Hidro_Poligonos_14_Select',
'Hidro_Poligonos_16_Select'],
'Hidro_Poligonos_21_Merge')
arcpy.Erase_analysis('APP_03_Dissolve',
'Hidro_Poligonos_20_Merge',
'APP_05_Erase', '#')
arcpy.Erase_analysis('APP_04_Dissolve',
'Hidro_Poligonos_21_Merge',
'APP_06_Erase', '#')
# -------------------------------------------------------------------------------------------------------
# Análises: Ajustando tabela de atributos
print '## Etapa 8: Ajustando tabela de atributos'
arcpy.AddField_management('APP_05_Erase', 'APP', 'TEXT', '', '', 20, '', 'NULLABLE', 'NON_REQUIRED')
arcpy.AddField_management('APP_06_Erase', 'APP', 'TEXT', '', '', 20, '', 'NULLABLE', 'NON_REQUIRED')
arcpy.CalculateField_management('APP_05_Erase','APP', repr('Sim'), 'PYTHON_9.3')
arcpy.CalculateField_management('APP_06_Erase','APP', repr('Sim'), 'PYTHON_9.3')
arcpy.AddField_management('APP_05_Erase', 'Area_ha', 'FLOAT')
arcpy.AddField_management('APP_06_Erase', 'Area_ha', 'FLOAT')
arcpy.CalculateField_management('APP_05_Erase','Area_ha', '!shape.area@hectares!', 'PYTHON_9.3')
arcpy.CalculateField_management('APP_06_Erase','Area_ha', '!shape.area@hectares!', 'PYTHON_9.3')
# -------------------------------------------------------------------------------------------------------
# Produtos Finais
print '## Etapa 9: Produtos Finais'
arcpy.MultipartToSinglepart_management('APP_05_Erase', 'APP_07_Multipart')
arcpy.MultipartToSinglepart_management('APP_06_Erase', 'APP_08_Multipart')
arcpy.DeleteField_management('APP_07_Multipart', 'ORIG_FID')
arcpy.DeleteField_management('APP_08_Multipart', 'ORIG_FID')
arcpy.Copy_management ('APP_07_Multipart',
os.path.join(out_pathFeatureDataSet, 'APP_HidroPerene'))
arcpy.Copy_management ('APP_08_Multipart',
os.path.join(out_pathFeatureDataSet, 'APP_HidroIntermitente'))
# -------------------------------------------------------------------------------------------------------
# Deletando Lixos
print '## Etapa 10: Deletando pasta de temporário'
arcpy.Delete_management(temp_pathFeatureDataSet, 'FeatureDataset')
# -------------------------------------------------------------------------------------------------------
# Finalizando
arcpy.ResetEnvironments()
print '# ' + '-' * 100
print '# End'
|
from devices import LogicDevice
from devices.basic import Xor, Not, And
from utils.basevariable import BaseVariable
class Expression(BaseVariable):
def __init__(self, value: LogicDevice):
self.value = value
class Variable(BaseVariable):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __xor__(self, other):
return Xor([self, other], Variable('xor_{0}_{1}'.format(self, other)))
def __invert__(self):
return Not(self, Variable('not_{0}'.format(self)))
def __and__(self, other):
return And([self, other], Variable('and_{0}_{1}'.format(self, other)))
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions used in losses."""
from tf3d import standard_fields
def get_batch_size_1_input_images(inputs, b):
"""Returns input dictionary containing tensors with batch size of 1.
Note that this function only applies its example selection to the image
tensors.
Args:
inputs: A dictionary of tf.Tensors with our input data.
b: Example index in the batch.
Returns:
inputs_1: A dictionary of tf.Tensors with batch size of one.
"""
b_1_inputs = {}
for field in standard_fields.get_input_image_fields():
if field in inputs:
b_1_inputs[field] = inputs[field][b:b + 1, Ellipsis]
return b_1_inputs
def get_batch_size_1_input_points(inputs, b):
"""Returns input dictionary containing tensors with batch size of 1.
Note that this function only applies its example selection to the point
tensors.
Args:
inputs: A dictionary of tf.Tensors with our input data.
b: Example index in the batch.
Returns:
inputs_1: A dictionary of tf.Tensors with batch size of one.
"""
b_1_inputs = {}
for field in standard_fields.get_input_point_fields():
if field in inputs:
b_1_inputs[field] = inputs[field][b]
return b_1_inputs
def get_batch_size_1_input_voxels(inputs, b):
"""Returns input dictionary containing tensors with batch size of 1.
Note that this function only applies its example selection to the voxel
tensors.
Args:
inputs: A dictionary of tf.Tensors with our input data.
b: Example index in the batch.
Returns:
inputs_1: A dictionary of tf.Tensors with batch size of one.
"""
b_1_inputs = {}
for field in standard_fields.get_input_voxel_fields():
if field in inputs:
b_1_inputs[field] = inputs[field][b]
return b_1_inputs
def get_batch_size_1_input_objects(inputs, b):
"""Returns input dictionary containing tensors with batch size of 1.
Note that this function only applies its example selection to the object
tensors.
Args:
inputs: A dictionary of tf.Tensors with our input data.
b: Example index in the batch.
Returns:
inputs_1: A dictionary of tf.Tensors with batch size of one.
"""
b_1_inputs = {}
for field in standard_fields.get_input_object_fields():
if field in inputs:
b_1_inputs[field] = inputs[field][b]
return b_1_inputs
def get_batch_size_1_output_images(outputs, b):
"""Returns output dictionary containing tensors with batch size of 1.
Note that this function only applies its example selection to the image
tensors.
Args:
outputs: A dictionary of tf.Tensors with the network output.
b: Example index in the batch.
Returns:
outputs_1: A dictionary of tf.Tensors with batch size of one.
"""
b_1_outputs = {}
for field in standard_fields.get_output_image_fields():
if field in outputs:
b_1_outputs[field] = outputs[field][b:b + 1, Ellipsis]
return b_1_outputs
def get_batch_size_1_output_points(outputs, b):
"""Returns output dictionary containing tensors with batch size of 1.
Note that this function only applies its example selection to the point
tensors.
Args:
outputs: A dictionary of tf.Tensors with the network output.
b: Example index in the batch.
Returns:
outputs_1: A dictionary of tf.Tensors with batch size of one.
"""
b_1_outputs = {}
for field in standard_fields.get_output_point_fields():
if field in outputs and outputs[field] is not None:
b_1_outputs[field] = outputs[field][b]
return b_1_outputs
def get_batch_size_1_output_voxels(outputs, b):
"""Returns output dictionary containing tensors with batch size of 1.
Note that this function only applies its example selection to the voxel
tensors.
Args:
outputs: A dictionary of tf.Tensors with the network output.
b: Example index in the batch.
Returns:
outputs_1: A dictionary of tf.Tensors with batch size of one.
"""
b_1_outputs = {}
for field in standard_fields.get_output_voxel_fields():
if field in outputs and outputs[field] is not None:
b_1_outputs[field] = outputs[field][b]
return b_1_outputs
def get_batch_size_1_output_objects(outputs, b):
"""Returns output dictionary containing tensors with batch size of 1.
Note that this function only applies its example selection to the object
tensors.
Args:
outputs: A dictionary of tf.Tensors with the network output.
b: Example index in the batch.
Returns:
outputs_1: A dictionary of tf.Tensors with batch size of one.
"""
b_1_outputs = {}
for field in standard_fields.get_output_object_fields():
if field in outputs and outputs[field] is not None:
b_1_outputs[field] = outputs[field][b]
return b_1_outputs
def get_batch_size_1_output_anchors(outputs, b):
"""Returns output dictionary containing tensors with batch size of 1.
Note that this function only applies its example selection to the anchor
tensors.
Args:
outputs: A dictionary of tf.Tensors with the network output.
b: Example index in the batch.
Returns:
outputs_1: A dictionary of tf.Tensors with batch size of one.
"""
b_1_outputs = {}
for field in standard_fields.get_output_anchor_fields():
if field in outputs and outputs[field] is not None:
b_1_outputs[field] = outputs[field][b]
return b_1_outputs
|
from game_map.models import Structure, Chunk
from websocket_controller.message_utils import round_down, error_message, require_message_content
from city_game_backend import CONSTANTS
from game_map.utils import struct_2_dict
# from django.core import serializers
import json
from websocket_controller.WebsocketRoutes import WebsocketRoutes
from city_game_backend import CONSTANTS
@WebsocketRoutes.route(CONSTANTS.MESSAGE_TYPE_DYNAMIC_CHUNK_DATA_REQUEST)
@require_message_content(
('lat', float),
('lon', float)
)
def handle_dynamic_chunk_data_request(message, websocket) -> str:
chunk_lat = round_down(
message['lat']
)
chunk_lon = round_down(
message['lon']
)
requested_chunk: Chunk = Chunk.objects.filter(
latitude_lower_bound=chunk_lat,
longitude_lower_bound=chunk_lon
).first()
if requested_chunk is None:
return error_message('No chunk at given coordinates')
requested_structures: [Structure] = Structure.objects.filter(
chunk=requested_chunk
)
# This returns way too much data and produces output that
# is incompatible with the Unity's deserialization system,
# so there is a replacement below
"""
return serializers.serialize('json', structures, fields=(
'latitude', 'longitude', 'taken_over', 'owner', 'tier', 'resource_type', 'resources_left'
))
"""
structures_to_send = [struct_2_dict(struct) for struct in requested_structures]
response = {
'structures': structures_to_send
}
return json.dumps(response)
|
__author__ = 'Jiafan Yu'
class SimpleAverage:
def __init__(self, step_ahead, window_size=168):
self.step_ahead = step_ahead
self.window_size = window_size
pass
def predict(self, X):
x_load = X[:, range(self.window_size, 2 * self.window_size)]
recent_load_indices = range(self.step_ahead, self.window_size, 24)
return x_load[:, recent_load_indices].mean(axis=1)
|
from packit.actions import ActionName
SYNCED_FILES_SCHEMA = {
"anyOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"src": {
"anyOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}},
]
},
"dest": {"type": "string"},
},
},
]
}
JOB_CONFIG_SCHEMA = {
"type": "object",
"properties": {
"job": {
"enum": [
"propose_downstream",
"build",
"sync_from_downstream",
"copr_build",
"tests",
]
},
"trigger": {"enum": ["release", "pull_request", "commit"]},
"notify": {"type": "array", "items": {"enum": ["pull_request_status"]}},
"metadata": {"type": "object"},
},
"required": ["trigger", "job"],
}
PACKAGE_CONFIG_SCHEMA = {
"type": "object",
"properties": {
"specfile_path": {"type": "string"},
"downstream_package_name": {"type": "string"},
"upstream_project_name": {"type": "string"},
"upstream_ref": {"type": "string"},
"create_tarball_command": {"type": "array", "items": {"type": "string"}},
"current_version_command": {"type": "array", "items": {"type": "string"}},
"allowed_gpg_keys": {"type": "array", "items": {"type": "string"}},
"synced_files": {"type": "array", "items": SYNCED_FILES_SCHEMA},
"jobs": {"type": "array", "items": JOB_CONFIG_SCHEMA},
"actions": {
"type": "object",
"properties": {
a: {"type": ["string", "array"]}
for a in ActionName.get_possible_values()
},
"additionalProperties": False,
},
},
"required": ["specfile_path"],
}
USER_CONFIG_SCHEMA = {
"type": "object",
"properties": {
"debug": {"type": "boolean"},
"dry_run": {"type": "boolean"},
"fas_user": {"type": "string"},
"keytab_path": {"type": "string"},
"github_token": {"type": "string"},
"pagure_user_token": {"type": "string"},
"pagure_fork_token": {"type": "string"},
"github_app_installation_id": {"type": "string"},
"github_app_id": {"type": "string"},
"github_app_cert_path": {"type": "string"},
},
}
|
# -*- coding: utf-8 -*-
import os # validate_path
import tkinter as tk # GuiC3D
def validate_path(path, isempty=False, metadata=False):
"""Check if the path exist. If a path is not provided, ask the user to type one.
:param path: path to validata
:type path: str
:param isempty: check if the folder is empty if True
:type isempty: bool
:param metadata: return also the metadata path if True
:return: path validated and metadata path if `metadata=True`
Example::
project_path, metadata_path = validate_path(project_path='/home/romain/Downloads/irsst',
isempty=True, metadata=True)
"""
# ask project path it is not given
if path is None:
path = input('Enter the path of the project: ')
# add trailing slash if not already here
project_path = os.path.join(path, '')
assert os.path.exists(project_path), 'the directory {} does not exist'.format(project_path)
# raise error if dir is not empty
if isempty:
assert not os.listdir(project_path), 'the directory {} is not empty'.format(project_path)
# return metadata path
if metadata:
metadata_path = os.path.join(path, 'metadata', '')
else:
metadata_path = None
return project_path, metadata_path
def create_root_folders(project_path):
"""Create the root folders of the project (*inputs, ouputs and metadata*)
:param project_path: path to the project
:type project_path: str
"""
folders = ['inputs', 'outputs', 'metadata']
[os.mkdir(project_path + ifolder) for ifolder in folders]
def get_data_folders(project_path, conf_file):
"""Get data folders and associated type (*markers and/or emg and/or emg*).
:param project_path: path to the project
:type project_path: str
:param conf_file: json conf file load as dict
:type conf_file: dict
:return: output: dict containing the data folder(s) as key and type (*markers and/or emg and/or emg*) as value
"""
participants = list(conf_file['participants']['pseudo'].values())
blacklist = list(conf_file['participants']['process'].values())
folders = list(conf_file['trials']['folder'].values())
output = {}
emg_folders = list(conf_file['trials']['emg'].values())
markers_folders = list(conf_file['trials']['markers'].values())
force_folders = list(conf_file['trials']['force'].values())
for b, iparticipant in enumerate(participants):
if blacklist[b]:
for i, ifolder in enumerate(folders):
value = []
key = os.path.join(project_path, 'inputs', iparticipant, ifolder, '')
if emg_folders[i]:
value.append('emg')
if markers_folders[i]:
value.append('markers')
if force_folders[i]:
value.append('force')
output[key] = value
return output
class GuiC3D:
def __init__(self, targets, fields):
self.targets = targets
self.fields = fields
self.idx = 0
self.mwheel_count = 0
self.assign = []
self.FONTSIZE = 20
self.init_master()
self.keyboard()
self.target()
self.lists()
self.buttons()
self.run()
def init_master(self):
self.root = tk.Tk()
self.root.title('GUI - channels assignment')
def keyboard(self):
self.root.bind('1', self.action_add)
self.root.bind('2', self.action_nan)
def target(self):
self.label = tk.Label(self.root, text=self.targets[self.idx], font=(None, self.FONTSIZE))
self.label.grid(row=0)
def lists(self):
self.list_fields = tk.Listbox(self.root, font=(None, self.FONTSIZE))
self.list_fields.focus_set()
self.list_fields.insert(0, *self.fields)
self.list_fields.grid(row=1, column=0, rowspan=7, padx=10)
self.list_fields.config(height=0)
self.list_assigned = tk.Listbox(self.root, font=(None, self.FONTSIZE))
self.list_assigned.grid(row=1, column=2, rowspan=7, padx=10)
self.list_fields.config(height=0)
def buttons(self):
self.button_add = tk.Button(self.root, text='Add [1]', font=(None, self.FONTSIZE),
command=self.action_add)
self.button_add.grid(row=1, column=1, sticky='W')
self.button_nan = tk.Button(self.root, text='NaN [2]', font=(None, self.FONTSIZE),
command=self.action_nan)
self.button_nan.grid(row=2, column=1, sticky='W')
def action_add(self, event=None):
selection = self.list_fields.curselection()[0]
self.list_fields.delete(selection)
self.list_assigned.insert('end', f'{self.idx}_{self.fields[selection]}')
self.prepare_next(selection)
def action_nan(self, event=None):
selection = self.list_fields.curselection()[0]
self.list_assigned.insert('end', f'{self.idx}_nan')
self.prepare_next(selection)
def prepare_next(self, selection):
self.list_fields.select_set(selection)
self.idx += 1
if self.idx >= len(self.targets):
self.root.destroy()
else:
self.label.config(text=self.targets[self.idx])
self.assign.append(self.fields[selection])
del (self.fields[selection])
def run(self):
self.root.mainloop()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 22:43:19 2019
@author: soumi
"""
from bs4 import BeautifulSoup
from RectUtils.RectObj import RectObj
# set attribute for Icon
def styleIconAttribute(top, left,width, height, position="absolute", color = "white"):
styleDic = {}
styleDic["background-color"] = color
styleDic["position"] = position
styleDic["top"] = str(top)+ "px"
styleDic["left"] = str(left)+ "px"
styleDic["width"] = str(width)+ "px"
styleDic["height"] = str(height)+ "px"
styleString= ""
for key in styleDic:
styleString += key+": " + styleDic[key] +";"
return styleString
# set attribute for Container
def styleContainerAttribute(top, left,width, height,position="absolute"):
styleDic = {}
styleDic["position"] = position
styleDic["top"] = str(top)+ "px"
styleDic["left"] = str(left)+ "px"
styleDic["width"] = str(width)+ "px"
styleDic["height"] = str(height)+ "px"
styleDic["border"] = "1px solid #000"
styleString= ""
for key in styleDic:
styleString += key+": " + styleDic[key] +";"
return styleString
# set attribute for rest of the item
def styleOtherAttribute(top, left,width, height,position="absolute"):
styleDic = {}
styleDic["position"] = position
styleDic["top"] = str(top)+ "px"
styleDic["left"] = str(left)+ "px"
styleDic["width"] = str(width)+ "px"
styleDic["height"] = str(height)+ "px"
styleString= ""
for key in styleDic:
styleString += key+": " + styleDic[key] +";"
return styleString
# set attribute for text item
def styleTextAttribute(top, left,width, height,position="absolute"):
styleDic = {}
styleDic["position"] = position
styleDic["margin-top"] = str(top)+ "px"
styleDic["left"] = str(left)+ "px"
styleDic["width"] = str(width)+ "px"
styleDic["height"] = str(height)+ "px"
styleDic["background-color"] = str("#bfbdb6")
styleString= ""
for key in styleDic:
styleString += key+": " + styleDic[key] +";"
return styleString
# create element from rectobojs
def newElement(soup, rectObj,parRect):
elementId = rectObj.elementId
left = rectObj.x
top = rectObj.y
if(rectObj.isContainer()):
styleAttr = styleContainerAttribute(top,left, rectObj.width, rectObj.height)
elementID = "element" + str(elementId)
new_node = soup.new_tag('div', id=elementID, style=styleAttr)
return new_node
elif(rectObj.isText()):
styleAttr = styleTextAttribute(top,left, rectObj.width, rectObj.height)
elementID = "element" + str(elementId)
new_node = soup.new_tag('p', id=elementID, style=styleAttr)
new_node.string = "Hello Text"
return new_node
elif(rectObj.isButtonText()):
styleAttr = styleOtherAttribute(top,left, rectObj.width, rectObj.height)
elementID = "element" + str(elementId)
new_node = soup.new_tag('Button', id=elementID, style=styleAttr)
new_node.string = "Button"
return new_node
elif(rectObj.isSlider()):
styleAttr = styleOtherAttribute(top,left, rectObj.width, rectObj.height)
elementID = "element" + str(elementId)
new_node = soup.new_tag('input',type="range",min=1,max=100,value=1, id=elementID, style=styleAttr)
return new_node
elif(rectObj.isToogle()):
styleAttr = styleOtherAttribute(top,left, rectObj.width, rectObj.height)
elementID = "element" + str(elementId)
new_node = soup.new_tag('label', id=elementID, style=styleAttr)
input_tag_val = soup.new_tag('input', type="checkbox")
span_tag_val = soup.new_tag('span', **{'class':'slider'})
new_node.append(input_tag_val)
new_node.append(span_tag_val)
return new_node
elif(rectObj.isCheckbox()):
styleAttr = styleOtherAttribute(top,left, rectObj.width, rectObj.height)
elementID = "element" + str(elementId)
new_node = soup.new_tag('input',type="checkbox", id=elementID, style=styleAttr)
return new_node
# create 1,2,3,4 dummy dropdown
elif(rectObj.isDropDown()):
styleAttr = styleOtherAttribute(top,left, rectObj.width, rectObj.height)
elementID = "element" + str(elementId)
new_node = soup.new_tag('select', id=elementID, style=styleAttr)
button_tag_val1 = soup.new_tag('option', value="1")
button_tag_val1.append("1")
button_tag_val2 = soup.new_tag('option', value="2")
button_tag_val2.append("2")
button_tag_val3 = soup.new_tag('option', value="3")
button_tag_val3.append("3")
button_tag_val4 = soup.new_tag('option', value="4")
button_tag_val4.append("4")
new_node.append(button_tag_val1)
new_node.append(button_tag_val2)
new_node.append(button_tag_val3)
new_node.append(button_tag_val4)
return new_node
else:
iconName = rectObj.getIconName()
styleAttr = styleIconAttribute(top,left,rectObj.width,rectObj.height)
imgSrc = "../images/"+iconName+".png"
elementID = "element" + str(elementId)
new_node = soup.new_tag('button', id=elementID, style=styleAttr)
styleImgAttr = styleIconAttribute(0,0,rectObj.width-4,rectObj.height-4)
button_tag_val = soup.new_tag('img',src=imgSrc, style=styleImgAttr )
# button_tag_val = soup.new_tag('img',src=imgSrc, height=str(rectObj.height)+"px", width=str(rectObj.width)+"px" )
new_node.append(button_tag_val)
return new_node
def clearElements(fileName, outFileName):
file = open(fileName, "r")
data = file.read()
# print (data)
soup = BeautifulSoup(data, 'html.parser')
with open(outFileName, "w") as file:
file.write(str(soup))
|
import os
import time
import argparse
import numpy as np
import pickle
# Custom Classes
import preprocess
def save_pickle(variable, fileName):
with open(fileName, 'wb') as f:
pickle.dump(variable, f)
def load_pickle_file(fileName):
with open(fileName, 'rb') as f:
return pickle.load(f)
def preprocess_for_training(train_A_dir, train_B_dir, cache_folder):
num_mcep = 24
sampling_rate = 16000
frame_period = 5.0
n_frames = 128
print("Starting to prepocess data.......")
start_time = time.time()
wavs_A = preprocess.load_wavs(wav_dir=train_A_dir, sr=sampling_rate)
wavs_B = preprocess.load_wavs(wav_dir=train_B_dir, sr=sampling_rate)
f0s_A, timeaxes_A, sps_A, aps_A, coded_sps_A = preprocess.world_encode_data(
wave=wavs_A, fs=sampling_rate, frame_period=frame_period, coded_dim=num_mcep)
f0s_B, timeaxes_B, sps_B, aps_B, coded_sps_B = preprocess.world_encode_data(
wave=wavs_B, fs=sampling_rate, frame_period=frame_period, coded_dim=num_mcep)
log_f0s_mean_A, log_f0s_std_A = preprocess.logf0_statistics(f0s=f0s_A)
log_f0s_mean_B, log_f0s_std_B = preprocess.logf0_statistics(f0s=f0s_B)
print("Log Pitch A")
print("Mean: {:.4f}, Std: {:.4f}".format(log_f0s_mean_A, log_f0s_std_A))
print("Log Pitch B")
print("Mean: {:.4f}, Std: {:.4f}".format(log_f0s_mean_B, log_f0s_std_B))
coded_sps_A_transposed = preprocess.transpose_in_list(lst=coded_sps_A)
coded_sps_B_transposed = preprocess.transpose_in_list(lst=coded_sps_B)
coded_sps_A_norm, coded_sps_A_mean, coded_sps_A_std = preprocess.coded_sps_normalization_fit_transform(
coded_sps=coded_sps_A_transposed)
coded_sps_B_norm, coded_sps_B_mean, coded_sps_B_std = preprocess.coded_sps_normalization_fit_transform(
coded_sps=coded_sps_B_transposed)
if not os.path.exists(cache_folder):
os.makedirs(cache_folder)
np.savez(os.path.join(cache_folder, 'logf0s_normalization.npz'),
mean_A=log_f0s_mean_A,
std_A=log_f0s_std_A,
mean_B=log_f0s_mean_B,
std_B=log_f0s_std_B)
np.savez(os.path.join(cache_folder, 'mcep_normalization.npz'),
mean_A=coded_sps_A_mean,
std_A=coded_sps_A_std,
mean_B=coded_sps_B_mean,
std_B=coded_sps_B_std)
save_pickle(variable=coded_sps_A_norm,
fileName=os.path.join(cache_folder, "coded_sps_A_norm.pickle"))
save_pickle(variable=coded_sps_B_norm,
fileName=os.path.join(cache_folder, "coded_sps_B_norm.pickle"))
end_time = time.time()
print("Preprocessing finsihed!! see your directory ../cache for cached preprocessed data")
print("Time taken for preprocessing {:.4f} seconds".format(
end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Prepare data for training Cycle GAN using PyTorch')
train_A_dir_default = '../data/vcc2016_training/SF1/'
train_B_dir_default = '../data/vcc2016_training/TF2/'
cache_folder_default = '../cache_check/'
parser.add_argument('--train_A_dir', type=str,
help="Directory for source voice sample", default=train_A_dir_default)
parser.add_argument('--train_B_dir', type=str,
help="Directory for target voice sample", default=train_B_dir_default)
parser.add_argument('--cache_folder', type=str,
help="Store preprocessed data in cache folders", default=cache_folder_default)
argv = parser.parse_args()
train_A_dir = argv.train_A_dir
train_B_dir = argv.train_B_dir
cache_folder = argv.cache_folder
preprocess_for_training(train_A_dir, train_B_dir, cache_folder)
|
# -*- coding: utf-8 -*-
# These classes and functions were implemented following the DOCs from the original glm (OpenGL Mathematics) lib.
# You can find it here: http://glm.g-truc.net/0.9.8/index.html
# Also, some parts of it were implemented following the Mack Stone's glm Python code in GitHub.
# His code can be found here: https://github.com/mackst/glm
from math import sqrt
class ivec2(object):
"""
ivec2 ( x = 0, y = 0 )
Creates a vector of two integers.
DEFAULT is (0, 0)
"""
def __init__(self, *args, **kwargs):
if kwargs:
self.x = kwargs.get('x', 0)
self.y = kwargs.get('y', 0)
elif args:
self.x, self.y = args
else:
self.x = 0
self.y = 0
class vec2(object):
"""
vec2 ( x = 0.0, y = 0.0 )
Creates a vector of two floats.
DEFAULT is (0.0, 0.0)
"""
def __init__(self, *args, **kwargs):
if kwargs:
self.x = kwargs.get('x', .0)
self.y = kwargs.get('y', .0)
elif args:
self.x, self.y = args
else:
self.x = .0
self.y = .0
class vec3(object):
"""
vec3 ( x = 0.0, y = 0.0, z = 0.0 )
Creates a vector of three floats.
Has some vector operations.
DEFAULT is (0.0, 0.0, 0.0)
"""
def __init__(self, *args, **kwargs):
if kwargs:
self.x = kwargs.get('x', .0)
self.y = kwargs.get('y', .0)
self.z = kwargs.get('z', .0)
elif args:
self.x, self.y, self.z = args
else:
self.x = .0
self.y = .0
self.z = .0
# Handling Negative Unary of vec3 Objects:
def __neg__(self):
return vec3(self.x * (-1), self.y * (-1), self.z * (-1))
# Handling Addition of vec3 Objects:
def __add__(self, value):
if isinstance(value, vec3):
return vec3(self.x + value.x, self.y + value.y, self.z + value.z)
else:
return vec3(self.x + value, self.y + value, self.z + value)
# Handling Reverse Addition of vec3 Objects:
def __radd__(self, value):
if isinstance(value, vec3):
return vec3(self.x + value.x, self.y + value.y, self.z + value.z)
else:
return vec3(self.x + value, self.y + value, self.z + value)
# Handling Subtraction of vec3 Objects:
def __sub__(self, value):
if isinstance(value, vec3):
return vec3(self.x - value.x, self.y - value.y, self.z - value.z)
else:
return vec3(self.x - value, self.y - value, self.z - value)
# Handling Reverse Subtraction of vec3 Objects:
def __rsub__(self, value):
if isinstance(value, vec3):
return vec3(value.x - self.x, value.y - self.y, value.z - self.z)
else:
return vec3(value - self.x, value - self.y, value - self.z)
# Handling Multiplication of vec3 Objects:
def __mul__(self, value):
if isinstance(value, vec3):
return vec3(self.x * value.x, self.y * value.y, self.z * value.z)
else:
return vec3(self.x * value, self.y * value, self.z * value)
# Handling Reverse Multiplication of vec3 Objects:
def __rmul__(self, value):
if isinstance(value, vec3):
return vec3(self.x * value.x, self.y * value.y, self.z * value.z)
else:
return vec3(self.x * value, self.y * value, self.z * value)
# Handling Division of vec3 Objects:
def __truediv__(self, value):
if isinstance(value, vec3):
return vec3(self.x / value.x, self.y / value.y, self.z / value.z)
else:
return vec3(self.x / value, self.y / value, self.z / value)
# Handling Reverse Division of vec3 Objects:
def __rtruediv__(self, value):
if isinstance(value, vec3):
return vec3(value.x / self.x, value.y / self.y, value.z / self.z)
else:
return vec3(value / self.x, value / self.y, value / self.z)
# Cross Product of Two vec3 Objects
def cross(first, second):
return vec3(
first.y * second.z - first.z * second.y,
first.z * second.x - first.x * second.z,
first.x * second.y - first.y * second.x)
# Dot Product of Two vec3 Objects
def dot(first, second):
if isinstance(first, float) and isinstance(second, float):
return first * second
elif isinstance(first, vec3) and isinstance(second, vec3):
return first.x * second.x + first.y * second.y + first.z * second.z
# Normalization of a vec3 Object
def normalize(value):
if isinstance(value, float):
if value < 0.0:
return -1.0
return 1.0
elif isinstance(value, vec3):
magnitude = sqrt(value.x ** 2 + value.y ** 2 + value.z ** 2)
return value / magnitude
|
char_embedding_len = 100
word_embedding_len = 100
char_hidden_size = 512
context_hidden_size = 1024
agg_hidden_size = 128
num_perspective = 12
class_size = 2
max_char_len = 15
max_word_len = 15
batch_size = 100
char_vocab_len = 1692
learning_rate = 0.0002
keep_prob = 0.7
epochs = 50
|
import os
import sys
from drink_partners.settings import constants
SIMPLE_SETTINGS = {
'OVERRIDE_BY_ENV': True,
'CONFIGURE_LOGGING': True
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'drink_partners.contrib.logs.filters.RequireDebugFalse'
},
'add_hostname': {
'()': 'drink_partners.contrib.logs.filters.AddHostName'
},
'ignore_if_contains': {
'()': 'drink_partners.contrib.logs.filters.IgnoreIfContains',
'substrings': ['/healthcheck/']
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(hostname)s %(name)s %(module)s:%(filename)s:%(lineno)d %(process)d %(thread)d == %(message)s' # noqa
},
'simple': {
'format': '%(hostname)s %(levelname)s %(name)s %(message)s'
},
},
'handlers': {
'stdout': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'simple',
'filters': [
'add_hostname',
'ignore_if_contains'
],
},
},
'loggers': {
'': {
'handlers': ['stdout'],
'level': 'INFO',
'propagate': True,
},
'asyncio': {
'level': 'WARNING',
'propagate': True,
},
'gunicorn.access': {
'level': 'CRITICAL',
'propagate': True,
}
}
}
# Auth applications
AUTH_APPLICATIONS = {
'dev': os.getenv('APP_TOKEN', 'test')
}
POOL_OF_RAMOS = {
'authentication': [
constants.STATIC_AUTHORIZATION_BACKEND,
],
'partners': [
constants.MONGODB_PARTNERS_BACKEND,
],
}
MOTOR_DB = os.getenv('MOTOR_DB', 'drink_partners')
MOTOR_URI = os.environ.get('MONGODB_URI', f'mongodb://127.0.0.1:27017/{MOTOR_DB}') # noqa
MOTOR_MAX_POOL_SIZE = int(os.environ.get('MONGO_MAX_POOL_SIZE', '1'))
MOTOR_KWARGS = {}
DEFAULT_AUTH_BACKEND_ID = 'static'
DEFAULT_PARTNERS_BACKEND_ID = 'mongodb'
|
# -*- coding: utf-8 -*-
"""Condition synchronization
"""
import time
import threading
import random
class Producer(threading.Thread):
def __init__(self, integers, condition):
super(Producer, self).__init__()
self.integers = integers
self.condition = condition
def run(self):
for i in range(10):
integer = random.randint(0, 256)
with self.condition:
print 'condition acquired by %s' % self.name
self.integers.append(integer)
print '%d append to list by %s' % (integer, self.name)
print 'condition notified by %s' % self.name
self.condition.notify()
print 'condition released by %s' % self.name
time.sleep(1)
class Consumer(threading.Thread):
def __init__(self, integers, condition):
super(Consumer, self).__init__()
self.integers = integers
self.condition = condition
def run(self):
while True:
with self.condition:
print 'condition acquire by %s' % self.name
while True:
if self.integers:
integer = self.integers.pop()
print '%d poped from list by %s' % (integer, self.name)
break
print 'condition wait by %s' % self.name
self.condition.wait()
print 'condition released by %s' % self.name
def main():
integers = []
condition = threading.Condition()
t1 = Producer(integers, condition)
t2 = Consumer(integers, condition)
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
# Convert features and labels to numpy arrays
import ast
import numpy
from lxml import etree
from sklearn.preprocessing import LabelEncoder
# Local imports
from data_tools import data_util
debug = True
''' Convert the dataframe feature column to a numpy array for processing
use_numpy: True if we should convert to a numpy array
doc_level: True if features should aggregate features at the document level (NOT IMPLEMENTED)
'''
def to_feats(df, use_numpy=True, doc_level=False, feat_names=None):
print('to_feats: use_numpy:', use_numpy, 'doc_level:', doc_level, 'feats:', feat_names)
feats = []
feat_columns = []
# Get the names of the feature columns for the df
if feat_names is None:
feat_columns.append('feats')
else: # Handle multiple feature types
for fname in feat_names:
feat_columns.append('feats_' + fname)
# Load a list of all the features
for i, row in df.iterrows():
if len(feat_columns) > 1:
mini_feat_list = []
for featname in feat_columns:
flist = row[featname]
if type(flist) is str:
flist = ast.literal_eval(flist)
if len(feat_columns) > 1:
mini_feat_list.append(flist)
else:
feats.append(flist)
#if debug:
# print('to_feats: ', row['docid'], 'feats:', flist)
if debug and i == 0:
print('feats:', flist)
#if len(flist) > 0:
# print('feats[0]:', type(flist[0]), flist[0])
if len(feat_columns) > 1:
feats.append(mini_feat_list)
if use_numpy:
return numpy.asarray(feats).astype('float')
else:
return feats
def to_labels(df, labelname, labelencoder=None, encode=True):
if debug:
print('to_labels: ', labelname, ', encode: ', str(encode))
#data = ast.literal_eval(df[labelname])
labels = []
# Extract the labels from the dataframe
for i, row in df.iterrows():
flist = row[labelname]
if debug: print('flist:', type(flist), str(flist))
#if type(flist) == str:
# flist = ast.literal_eval(flist)
if debug and i == 0:
print('labels[0]:', flist)
labels.append(flist)
# Normalize the rank values
if labelname == 'event_ranks':
enc_labels = []
for rank_list in labels:
norm_ranks = []
if rank_list is not None and len(rank_list) > 0 and not rank_list == '[]':
if type(rank_list) == str:
rank_list = ast.literal_eval(rank_list)
min_rank = float(numpy.nanmin(numpy.array(rank_list, dtype=numpy.float), axis=None))
# Scale min rank to 0
if min_rank is not numpy.nan and min_rank > 0:
rank_list_scaled = []
for rank in rank_list:
if rank is None or rank is numpy.nan:
rank_list_scaled.append(-1)
else:
rank_list_scaled.append(rank - min_rank)
rank_list = rank_list_scaled
if encode:
max_rank = float(numpy.nanmax(numpy.array(rank_list, dtype=numpy.float), axis=None))
if max_rank == numpy.nan or max_rank == 0:
print('WARNING: max rank is 0')
norm_ranks = rank_list # Don't normalize if they're all 0
else: # Normalize
norm_ranks = []
for rank in rank_list:
if rank is None or rank == -1:
norm_ranks.append(float(-1))
else:
norm_ranks.append(float(rank)/max_rank)
rank_list = norm_ranks
print('normalized ranks', rank_list)
enc_labels.append(numpy.asarray(rank_list))
labels = enc_labels
# Encode other labels
elif encode:
if labelencoder is None:
labelencoder = create_labelencoder(labels)
labels = encode_labels_plain(labels)
if debug: print('to_labels:', labelname, 'encode:', encode, 'labels:', len(labels))
return labels, labelencoder
def create_labelencoder(data, num=0):
global labelencoder, onehotencoder, num_labels
if debug: print("create_labelencoder: data[0]: ", str(data[0]))
if type(data[0]) is list:
data = [j for sub in data for j in sub]
labelencoder = LabelEncoder()
labelencoder.fit(data)
num_labels = len(labelencoder.classes_)
#onehotencoder = OneHotEncoder()
#onehotencoder.fit(data2)
return labelencoder
def encode_labels_plain(data, labenc=None):
if labenc is None:
labenc = labelencoder
print('encode_labels_plain:', str(data))
if type(data[0]) is list:
new_lab = []
for item in data:
new_lab.append(labenc.transform(item))
else:
new_lab = labenc.transform(data)
#print('encoded labels:', new_lab)
return new_lab
''' Encodes labels as one-hot vectors (entire dataset: 2D array)
data: a 1D array of labels
num_labels: the number of label classes
'''
def encode_labels(data, labenc=None, max_len=50, pad=True):
if labenc is None:
labenc = labelencoder
if labenc is None: # or onehotencoder == None:
print("Error: labelencoder must be trained before it can be used!")
return None
#return onehotencoder.transform(labelencoder.transform(data))
data2 = []
num_labels = len(labenc.classes_)
zero_vec = data_util.zero_vec(num_labels)
if debug: print("data: ", str(len(data)))
for item in data:
#print "item len: " + str(len(item))
new_item = []
if len(item) > 0:
item2 = labenc.transform(item)
for lab in item2:
onehot = []
for x in range(num_labels):
onehot.append(0)
onehot[lab] = 1
new_item.append(onehot)
# Pad vectors
if pad:
if len(new_item) > max_len:
new_item = new_item[0:max_len]
while len(new_item) < max_len:
new_item.append(zero_vec)
data2.append(new_item)
return data2
''' Decodes one sequence of labels
'''
def decode_labels(data, labenc=None):
#print "decode_labels"
if labenc is None:
labenc = labelencoder
data2 = []
for row in data:
#print "- row: " + str(row)
lab = numpy.argmax(numpy.asarray(row))
#print "- lab: " + str(lab)
data2.append(lab)
#print "- data2: " + str(data2)
return labenc.inverse_transform(data2)
#return labelencoder.inverse_transform(onehotencoder.reverse_transform(data))
def decode_all_labels(data, labenc=None):
decoded_labels = []
for sequence in data:
labs = decode_labels(sequence, labenc)
decoded_labels.append(labs)
return decoded_labels
''' Put 0 features corresponding to the labels if no features are required for the model
(i.e. for random or mention order)
'''
def dummy_function(df, feat_name='feats', doc_level=False):
print('dummy_function')
df[feat_name] = '0'
if not doc_level:
for i, row in df.iterrows():
fake_feats = []
if row['events'] is not None:
print('events:', row['events'], type(row['events']))
if (type(row['events']) is list and len(row['events']) == 0) or (type(row['events']) is str and row['events'] == '[]'):
print('event list is empty')
else:
if type(row['events']) is list:
event_list = row['events']
else:
event_list = etree.fromstring(str(row['events']))
if debug: print(row['docid'], 'dummy_function events:', type(event_list))#, etree.tostring(event_list))
if type(event_list) == str:
#event_list = eval(event_list)
event_list = ast.literal_eval(event_list)
if debug: print(row['docid'], 'dummy_function events len:', len(event_list))
for entry in event_list:
fake_feats.append(0)
df.at[i, feat_name] = fake_feats
print('dummy_function:', row['docid'], 'feats:', len(fake_feats))
return df
def do_nothing(df, feat_name='feats', doc_level=False):
if feat_name not in df.columns:
df[feat_name] = '0'
return df
|
from kat.harness import Query
from abstract_tests import AmbassadorTest, ServiceType, HTTP
import json
class MaxRequestHeaderKBTest(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: Module
name: ambassador
ambassador_id: {self.ambassador_id}
config:
max_request_headers_kb: 30
""")
yield self, self.format("""
---
apiVersion: ambassador/v2
kind: Mapping
name: {self.name}
hostname: "*"
prefix: /target/
service: http://{self.target.path.fqdn}
""")
def queries(self):
h1 = 'i' * (31 * 1024)
yield Query(self.url("target/"), expected=431,
headers={'big':h1})
h2 = 'i' * (29 * 1024)
yield Query(self.url("target/"), expected=200,
headers={'small':h2})
def check(self):
# We're just testing the status codes above, so nothing to check here
assert True
class MaxRequestHeaderKBMaxTest(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: Module
name: ambassador
ambassador_id: {self.ambassador_id}
config:
max_request_headers_kb: 96
""")
yield self, self.format("""
---
apiVersion: ambassador/v2
kind: Mapping
name: {self.name}
hostname: "*"
prefix: /target/
service: http://{self.target.path.fqdn}
""")
def queries(self):
# without the override the response headers will cause envoy to respond with a 503
h1 = 'i' * (97 * 1024)
yield Query(self.url("target/?override_extauth_header=1"), expected=431,
headers={'big':h1})
h2 = 'i' * (95 * 1024)
yield Query(self.url("target/?override_extauth_header=1"), expected=200,
headers={'small':h2})
def check(self):
# We're just testing the status codes above, so nothing to check here
assert True
|
#!/usr/bin/env python
# Copyright (C) 2017 Udacity Inc.
#
# This file is part of Robotic Arm: Pick and Place project for Udacity
# Robotics nano-degree program
#
# All Rights Reserved.
# Author: Harsh Pandya
# import modules
import rospy
import tf
from kuka_arm.srv import *
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import Pose
from mpmath import *
from sympy import *
def rotation_x(radians):
#Rotation matrix for x-axis
rotation_matrix = Matrix([
[1, 0, 0],
[0, cos(radians), -sin(radians)],
[0, sin(radians), cos(radians)],
])
return rotation_matrix
def rotation_y(radians):
# Rotation matrix for y-axis
rotation_matrix = Matrix([
[cos(radians), 0, sin(radians)],
[0, 1, 0],
[-sin(radians), 0, cos(radians)],
])
return rotation_matrix
def rotation_z(radians):
# Rotation matrix for z-axis
rotation_matrix = Matrix([
[cos(radians), -sin(radians), 0],
[sin(radians), cos(radians), 0],
[0, 0, 1],
])
return rotation_matrix
def create_rrpy_matrix(roll, pitch, yaw):
# Generation of Rrpy matrix consist of z-y-x extrinsic rotations.
# Roll, pitch and yaw values are calculated from quaternions which comes from ROS message
rot_x = rotation_x(roll)
rot_y = rotation_y(pitch)
rot_z = rotation_z(yaw)
Rrpy = rot_z * rot_y * rot_x
return Rrpy
def handle_calculate_IK(req):
rospy.loginfo("Received %s eef-poses from the plan" % len(req.poses))
if len(req.poses) < 1:
print
"No valid poses received"
return -1
else:
# Create symbols
q1, q2, q3, q4, q5, q6, q7 = symbols('q1:8')
# Create Modified DH parameters
d1, d2, d3, d4, d5, d6, d7 = symbols('d1:8')
a0, a1, a2, a3, a4, a5, a6 = symbols('a0:7')
alpha0, alpha1, alpha2, alpha3, alpha4, alpha5, alpha6 = symbols('alpha0:7')
#
# Define Modified DH Transformation matrix
s = {alpha0: 0, a0: 0, d1: 0.75, # row 1
alpha1: -pi / 2, a1: 0.35, d2: 0, q2: q2 - pi / 2, # row 2
alpha2: 0, a2: 1.25, d3: 0, # row 3
alpha3: -pi / 2, a3: -0.054, d4: 1.5, # row 4
alpha4: pi / 2, a4: 0, d5: 0, # row 5
alpha5: -pi / 2, a5: 0, d6: 0, # row 6
alpha6: 0, a6: 0, d7: 0.303, } # row 7
#
# Create individual transformation matrices, better to do this outside of the loop
T0_1 = Matrix([[cos(q1), -sin(q1), 0, a0],
[sin(q1) * cos(alpha0), cos(q1) * cos(alpha0), -sin(alpha0), -sin(alpha0) * d1],
[sin(q1) * sin(alpha0), cos(q1) * sin(alpha0), cos(alpha0), cos(alpha0) * d1],
[0, 0, 0, 1]])
T0_1 = T0_1.subs(s)
T1_2 = Matrix([[cos(q2), -sin(q2), 0, a1],
[sin(q2) * cos(alpha1), cos(q2) * cos(alpha1), -sin(alpha1), -sin(alpha1) * d2],
[sin(q2) * sin(alpha1), cos(q2) * sin(alpha1), cos(alpha1), cos(alpha1) * d2],
[0, 0, 0, 1]])
T1_2 = T1_2.subs(s)
T2_3 = Matrix([[cos(q3), -sin(q3), 0, a2],
[sin(q3) * cos(alpha2), cos(q3) * cos(alpha2), -sin(alpha2), -sin(alpha2) * d3],
[sin(q3) * sin(alpha2), cos(q3) * sin(alpha2), cos(alpha2), cos(alpha2) * d3],
[0, 0, 0, 1]])
T2_3 = T2_3.subs(s)
T3_4 = Matrix([[cos(q4), -sin(q4), 0, a3],
[sin(q4) * cos(alpha3), cos(q4) * cos(alpha3), -sin(alpha3), -sin(alpha3) * d4],
[sin(q4) * sin(alpha3), cos(q4) * sin(alpha3), cos(alpha3), cos(alpha3) * d4],
[0, 0, 0, 1]])
T3_4 = T3_4.subs(s)
T4_5 = Matrix([[cos(q5), -sin(q5), 0, a4],
[sin(q5) * cos(alpha4), cos(q5) * cos(alpha4), -sin(alpha4), -sin(alpha4) * d5],
[sin(q5) * sin(alpha4), cos(q5) * sin(alpha4), cos(alpha4), cos(alpha4) * d5],
[0, 0, 0, 1]])
T4_5 = T4_5.subs(s)
T5_6 = Matrix([[cos(q6), -sin(q6), 0, a5],
[sin(q6) * cos(alpha5), cos(q6) * cos(alpha5), -sin(alpha5), -sin(alpha5) * d6],
[sin(q6) * sin(alpha5), cos(q6) * sin(alpha5), cos(alpha5), cos(alpha5) * d6],
[0, 0, 0, 1]])
T5_6 = T5_6.subs(s)
T6_7 = Matrix([[cos(q7), -sin(q7), 0, a6],
[sin(q7) * cos(alpha6), cos(q7) * cos(alpha6), -sin(alpha6), -sin(alpha6) * d7],
[sin(q7) * sin(alpha6), cos(q7) * sin(alpha6), cos(alpha6), cos(alpha6) * d7],
[0, 0, 0, 1]])
T6_7 = T6_7.subs(s)
T0_7 = T0_1 * T1_2 * T2_3 * T3_4 * T4_5 * T5_6 * T6_7
# Initialize service response
joint_trajectory_list = []
for x in xrange(0, len(req.poses)):
# IK code starts here
joint_trajectory_point = JointTrajectoryPoint()
# Extract end-effector position and orientation from request
# px,py,pz = end-effector position
# roll, pitch, yaw = end-effector orientation
px = req.poses[x].position.x
py = req.poses[x].position.y
pz = req.poses[x].position.z
(roll, pitch, yaw) = tf.transformations.euler_from_quaternion(
[req.poses[x].orientation.x, req.poses[x].orientation.y,
req.poses[x].orientation.z, req.poses[x].orientation.w])
# Generate Rrpy matrix for end effector wrt base link
Rrpy = create_rrpy_matrix(roll, pitch, yaw)
# Compensate for rotation discrepancy between DH parameters and Gazebo
# I managed to aling the axes by rotating on z axis by 180 degree and rotating on y axis by -90 degree
Rcorr = rotation_z(pi) * rotation_y(-pi / 2)
# Apply the correction matrix
Rrpy = Rrpy * Rcorr
# Calculating wrist center position wrt base link
#
nx = Rrpy[0, 2]
ny = Rrpy[1, 2]
nz = Rrpy[2, 2]
# d7 = 0.303 which is wc to end effector
wx = px - 0.303 * nx
wy = py - 0.303 * ny
wz = pz - 0.303 * nz
# Calculate joint angles using Geometric IK method
### Calculate theta1 - theta3 - Inverse Position Problem
# theta 1 is the easy one as it is simply tangent angle between wy and wx looking from above
theta1 = atan2(wy, wx)
# theta 2 and theta 3 is relatively tricky as it is hard to visualize. For both ve need to use
# trigonometric calculations for two adjacent triangle
r = sqrt(wx ** 2 + wy ** 2) - 0.35 # radial distance from link2 to wc from above, 0.35 is a1
# Construct the triangle for cosine law. A is the angle in front of corner with angle a,
# B is the angle in front of corner with angle b. For more detail, see writeup.md
A = 1.5011 # sqrt(1.5 **2 + 0.054 **2)
B = sqrt(r ** 2 + (wz - 0.75) ** 2) #
C = 1.25 # a2 = 1.25
# calculate angle a by using cosine law
a = acos((B ** 2 + C ** 2 - A ** 2) / (2 * B * C))
# compensate 90 degree resting angle and the vertical difference between link2 and wc
theta2 = pi / 2 - a - atan2(wz - 0.75, r)
# calculate angle b by using cosine law
b = acos((A ** 2 + C ** 2 - B ** 2) / (2 * A * C))
# compensate 90 degree resting angle and the small drop difference between link3 and wc
# 0.036 radian is necessary to be taken into account for small drop from joint3 to 4. It is simply atan2(0.054,1.5)
theta3 = pi / 2 - (b + 0.036)
print("Theta1, theta2 and theta3 joint angles are calculated")
### Calculate theta4 - theta6 - Inverse Orientation Problem
#Rotation matrix from base link to link 3 derived from transformation matrix
R0_3 = (T0_1 * T1_2 * T2_3)[0:3, 0:3]
# Apply the calculated theta1 - theta 3 values into rotation matrix.
R0_3 = R0_3.evalf(subs={'q1': theta1, 'q2': theta2, 'q3': theta3})
#Calculate the rotation matrix from link3 to link 6 and apply the correction matrix
R3_6 = R0_3.T * Rrpy # in theory inverse and transpose of R0_3 is equal as rotation matrix is orthogonal.
# But sometimes inverse method seems to be causing some numerical problems so I used transpose method.
# each element of R3_6 matrix consist of one or more trigonometric terms.
# Derivation of theta4,5 and 6 from these terms are well explained in the writeup document.
# Simply, first I calculated theta 5 by acos function as, R3_6[1, 2] = cos(q5)
# Then theta4 and 6 are calculated by dividing to terms of the matrix to cancel-out unwanted angle terms.
theta5 = acos(R3_6[1, 2])
if sin(theta5) < 0:
theta4 = atan2(-R3_6[2, 2], R3_6[0, 2])
theta6 = atan2(R3_6[1, 1], -R3_6[1, 0])
else:
theta4 = atan2(R3_6[2, 2], -R3_6[0, 2])
theta6 = atan2(-R3_6[1, 1], R3_6[1, 0])
print("Theta4, theta5 and theta6 joint angles are calculated")
print("Joint angles for eef position", str(x), "are : ", theta1, theta2, theta3, theta4, theta5, theta6)
# Populate response for the IK request
# In the next line replace theta1,theta2...,theta6 by your joint angle variables
joint_trajectory_point.positions = [theta1, theta2, theta3, theta4, theta5, theta6]
joint_trajectory_list.append(joint_trajectory_point)
rospy.loginfo("length of Joint Trajectory List: %s" % len(joint_trajectory_list))
return CalculateIKResponse(joint_trajectory_list)
def IK_server():
# initialize node and declare calculate_ik service
rospy.init_node('IK_server')
s = rospy.Service('calculate_ik', CalculateIK, handle_calculate_IK)
print
"Ready to receive an IK request"
rospy.spin()
if __name__ == "__main__":
IK_server()
|
table_data=[[]for i in range(5)]
print(table_data)
for i in range(4):
table_data[i].append(i)
print(table_data)
|
from django.contrib import admin
from .models import Category, Request, RequestImage, PoliceOffice
admin.site.register(PoliceOffice)
admin.site.register(Category)
admin.site.register(Request)
admin.site.register(RequestImage)
|
import pyodbc
try:
connect = pyodbc.connect(r'Driver= {Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\Users\ichil\Downloads\Database2.accdb')
print("Connected to a Database")
|
import sys
import threading
try:
# python 2
from thread import error as ThreadError
except:
# python 3
ThreadError = RuntimeError
import greenlet
DEBUG = False
class DeadLockError(Exception):
pass
class Lock(object):
def __init__(self):
if DEBUG:
sys.stdout.write('L')
self._locked = False
def acquire(self, blocking=True):
thread = greenlet.getcurrent()
scheduler = thread.scheduler
assert thread.waiting_for is None
scheduler.switch_before_acquire_lock()
if self._locked:
if blocking:
thread.waiting_for = self
scheduler.switch_after_blocking_on_locked_lock()
return True
else:
return False
else:
self._locked = True
return True
def release(self):
thread = greenlet.getcurrent()
scheduler = thread.scheduler
assert thread.waiting_for is None
if self._locked:
threads_waiting = scheduler.get_threads_waiting_for(self)
if not threads_waiting:
self._locked = False
scheduler.switch_after_release_lock_when_no_threads_waiting()
else:
scheduler.choose_thread_to_acquire_lock(
threads_waiting
).waiting_for = None
scheduler.switch_after_release_lock_to_another_thread()
else:
raise ThreadError('release unlocked lock')
def locked(self):
return self._locked
def __enter__(self):
self.acquire()
def __exit__(self, *args, **kwargs):
self.release()
class AbstractScheduler(object):
def choose_thread(self, threads):
raise NotImplementedError
def choose_thread_to_awake(self, threads):
return self.choose_thread(threads)
def choose_thread_to_acquire_lock(self, threads):
return self.choose_thread(threads)
def switch(self):
raise NotImplementedError
def switch_on_breakpoint(self):
self.switch()
def switch_before_acquire_lock(self):
"""Context switch just before trying to acquire lock to catch bugs
when locks are acquired too late.
"""
self.switch()
def switch_after_blocking_on_locked_lock(self):
"""It is crucially important that this method calls switch() because
otherwise threads would not block on locks.
"""
self.switch()
def switch_after_release_lock(self):
"""Context switch just after releasing lock to catch bugs when locks
are released too early.
"""
self.switch()
def switch_after_release_lock_to_another_thread(self):
self.switch_after_release_lock()
def switch_after_release_lock_when_no_threads_waiting(self):
self.switch_after_release_lock()
class BaseScheduler(AbstractScheduler):
def __init__(self):
self.threads = None
def __enter__(self):
# enter and exit methods are assumed to be called from the same greenlet
thread = greenlet.getcurrent()
if hasattr(thread, 'scheduler'):
self.previous_scheduler = thread.scheduler
thread.scheduler = self
thread.waiting_for = None
self.threads = [thread]
def __exit__(self, *args, **kwargs):
thread = greenlet.getcurrent()
if hasattr(self, 'previous_scheduler'):
thread.scheduler = self.previous_scheduler
del self.previous_scheduler
else:
del thread.scheduler
self.threads = None
def create_thread(self, callable):
if DEBUG:
sys.stdout.write('T')
thread = greenlet.greenlet(callable)
thread.scheduler = self
thread.waiting_for = None
self.threads.append(thread)
return id(thread)
def get_threads_waiting_for(self, x):
self.threads = [t for t in self.threads if not t.dead]
return [t for t in self.threads if t.waiting_for is x]
def switch(self):
thread = greenlet.getcurrent()
# get threads that don't wait on any lock
threads = self.get_threads_waiting_for(None)
if not threads:
raise DeadLockError
self.choose_thread_to_awake(threads).switch()
if thread.waiting_for is not None:
self.switch()
|
#!/bin/python3
import math
import os
import random
import re
import sys
def Type_Number(n):
if n%2 != 0:
print("Weird")
if n%2 == 0:
if (n>20):
print("Not Weird")
elif (n>=6):
print("Weird")
elif (n>=2):
print("Not Weird")
if __name__ == '__main__':
N = int(input())
Type_Number(N)
|
import re
from io import BytesIO
from time import sleep
from typing import Optional
from typing import Optional, List
from telegram import TelegramError, Chat, Message
from telegram import Update, Bot
from telegram import ParseMode
from telegram.error import BadRequest
from telegram.ext import MessageHandler, Filters, CommandHandler
from telegram.ext.dispatcher import run_async
import FilterryBot.modules.sql.users_sql as sql
from FilterryBot import dispatcher, OWNER_ID, LOGGER, SUDO_USERS, SUPPORT_USERS
from telegram.utils.helpers import escape_markdown
from FilterryBot.modules.helper_funcs.filters import CustomFilters
from FilterryBot.modules.helper_funcs.chat_status import is_user_ban_protected, bot_admin
from FilterryBot.modules.translations.strings import tld
USERS_GROUP = 4
def get_user_id(username):
# ensure valid userid
if len(username) <= 5:
return None
if username.startswith('@'):
username = username[1:]
users = sql.get_userid_by_name(username)
if not users:
return None
elif len(users) == 1:
return users[0].user_id
else:
for user_obj in users:
try:
userdat = dispatcher.bot.get_chat(user_obj.user_id)
if userdat.username == username:
return userdat.id
except BadRequest as excp:
if excp.message == 'Chat not found':
pass
else:
LOGGER.exception("Error extracting user ID")
return None
@run_async
def broadcast(bot: Bot, update: Update):
to_send = update.effective_message.text.split(None, 1)
if len(to_send) >= 2:
chats = sql.get_all_chats() or []
failed = 0
for chat in chats:
try:
bot.sendMessage(int(chat.chat_id), to_send[1])
sleep(0.1)
except TelegramError:
failed += 1
LOGGER.warning("Couldn't send broadcast to %s, group name %s", str(chat.chat_id), str(chat.chat_name))
update.effective_message.reply_text("Broadcast complete. {} groups failed to receive the message, probably "
"due to being kicked.".format(failed))
@run_async
def log_user(bot: Bot, update: Update):
chat = update.effective_chat # type: Optional[Chat]
msg = update.effective_message # type: Optional[Message]
sql.update_user(msg.from_user.id,
msg.from_user.username,
chat.id,
chat.title)
if msg.reply_to_message:
sql.update_user(msg.reply_to_message.from_user.id,
msg.reply_to_message.from_user.username,
chat.id,
chat.title)
if msg.forward_from:
sql.update_user(msg.forward_from.id,
msg.forward_from.username)
@run_async
def chats(bot: Bot, update: Update):
all_chats = sql.get_all_chats() or []
chatfile = 'List of chats.\n0. Chat name | Chat ID | Members count | Invitelink\n'
P = 1
for chat in all_chats:
try:
curr_chat = bot.getChat(chat.chat_id)
bot_member = curr_chat.get_member(bot.id)
chat_members = curr_chat.get_members_count(bot.id)
if bot_member.can_invite_users:
invitelink = bot.exportChatInviteLink(chat.chat_id)
else:
invitelink = "0"
chatfile += "{}. {} | {} | {} | {}\n".format(P, chat.chat_name, chat.chat_id, chat_members, invitelink)
P = P + 1
except:
pass
with BytesIO(str.encode(chatfile)) as output:
output.name = "chatlist.txt"
update.effective_message.reply_document(document=output, filename="chatlist.txt",
caption="Here is the list of chats in my database.")
@run_async
def banall(bot: Bot, update: Update, args: List[int]):
if args:
chat_id = str(args[0])
all_mems = sql.get_chat_members(chat_id)
else:
chat_id = str(update.effective_chat.id)
all_mems = sql.get_chat_members(chat_id)
for mems in all_mems:
try:
bot.kick_chat_member(chat_id, mems.user)
update.effective_message.reply_text("Tried banning " + str(mems.user))
sleep(0.1)
except BadRequest as excp:
update.effective_message.reply_text(excp.message + " " + str(mems.user))
continue
@run_async
def snipe(bot: Bot, update: Update, args: List[str]):
try:
chat_id = str(args[0])
del args[0]
except TypeError as excp:
update.effective_message.reply_text("Please give me a chat to echo to!")
to_send = " ".join(args)
if len(to_send) >= 2:
try:
bot.sendMessage(int(chat_id), str(to_send))
except TelegramError:
LOGGER.warning("Couldn't send to group %s", str(chat_id))
update.effective_message.reply_text("Couldn't send the message. Perhaps I'm not part of that group?")
@run_async
@bot_admin
def getlink(bot: Bot, update: Update, args: List[int]):
message = update.effective_message
if args:
pattern = re.compile(r'-\d+')
else:
message.reply_text("You don't seem to be referring to any chats.")
links = "Invite link(s):\n"
for chat_id in pattern.findall(message.text):
try:
chat = bot.getChat(chat_id)
bot_member = chat.get_member(bot.id)
if bot_member.can_invite_users:
invitelink = bot.exportChatInviteLink(chat_id)
links += str(chat_id) + ":\n" + invitelink + "\n"
else:
links += str(chat_id) + ":\nI don't have access to the invite link." + "\n"
except BadRequest as excp:
links += str(chat_id) + ":\n" + excp.message + "\n"
except TelegramError as excp:
links += str(chat_id) + ":\n" + excp.message + "\n"
message.reply_text(links)
@bot_admin
def leavechat(bot: Bot, update: Update, args: List[int]):
if args:
chat_id = int(args[0])
else:
update.effective_message.reply_text("You do not seem to be referring to a chat!")
try:
chat = bot.getChat(chat_id)
titlechat = bot.get_chat(chat_id).title
bot.sendMessage(chat_id, "`I'll Go Away!`")
bot.leaveChat(chat_id)
update.effective_message.reply_text("I'll left group {}".format(titlechat))
except BadRequest as excp:
if excp.message == "Chat not found":
update.effective_message.reply_text("It looks like I've been kicked out of the group :p")
else:
return
@run_async
def slist(bot: Bot, update: Update):
message = update.effective_message
text1 = "My sudo users are:"
text2 = "My support users are:"
for user_id in SUDO_USERS:
try:
user = bot.get_chat(user_id)
name = "[{}](tg://user?id={})".format(user.first_name + (user.last_name or ""), user.id)
if user.username:
name = escape_markdown("@" + user.username)
text1 += "\n - `{}`".format(name)
except BadRequest as excp:
if excp.message == 'Chat not found':
text1 += "\n - ({}) - not found".format(user_id)
for user_id in SUPPORT_USERS:
try:
user = bot.get_chat(user_id)
name = "[{}](tg://user?id={})".format(user.first_name + (user.last_name or ""), user.id)
if user.username:
name = escape_markdown("@" + user.username)
text2 += "\n - `{}`".format(name)
except BadRequest as excp:
if excp.message == 'Chat not found':
text2 += "\n - ({}) - not found".format(user_id)
message.reply_text(text1 + "\n" + text2 + "\n", parse_mode=ParseMode.MARKDOWN)
#message.reply_text(text2 + "\n", parse_mode=ParseMode.MARKDOWN)
def __user_info__(user_id, chat_id):
if user_id == dispatcher.bot.id:
return tld(chat_id, "I've seen them in... Wow. Are they stalking me? They're in all the same places I am... oh. It's me.")
num_chats = sql.get_user_num_chats(user_id)
return tld(chat_id, "I've seen them in <code>{}</code> chats in total.").format(num_chats)
def __stats__():
return "{} users, across {} chats".format(sql.num_users(), sql.num_chats())
def __gdpr__(user_id):
sql.del_user(user_id)
def __migrate__(old_chat_id, new_chat_id):
sql.migrate_chat(old_chat_id, new_chat_id)
__help__ = "" # no help string
__mod_name__ = "Users"
BROADCAST_HANDLER = CommandHandler("broadcasts", broadcast, filters=Filters.user(OWNER_ID))
USER_HANDLER = MessageHandler(Filters.all & Filters.group, log_user)
CHATLIST_HANDLER = CommandHandler("chatlist", chats, filters=Filters.user(OWNER_ID))
SNIPE_HANDLER = CommandHandler("snipe", snipe, pass_args=True, filters=Filters.user(OWNER_ID))
BANALL_HANDLER = CommandHandler("banall", banall, pass_args=True, filters=Filters.user(OWNER_ID))
GETLINK_HANDLER = CommandHandler("getlink", getlink, pass_args=True, filters=Filters.user(OWNER_ID))
LEAVECHAT_HANDLER = CommandHandler("leavechat", leavechat, pass_args=True, filters=Filters.user(OWNER_ID))
SLIST_HANDLER = CommandHandler("slist", slist,
filters=CustomFilters.sudo_filter | CustomFilters.support_filter)
dispatcher.add_handler(SNIPE_HANDLER)
dispatcher.add_handler(BANALL_HANDLER)
dispatcher.add_handler(GETLINK_HANDLER)
dispatcher.add_handler(LEAVECHAT_HANDLER)
dispatcher.add_handler(SLIST_HANDLER)
dispatcher.add_handler(USER_HANDLER, USERS_GROUP)
dispatcher.add_handler(BROADCAST_HANDLER)
dispatcher.add_handler(CHATLIST_HANDLER)
|
import random
import string
import ipaddress
import struct
from typing import Dict, Optional
import asyncio
from aiohttp import ClientSession
from urllib.parse import urlencode
from pprint import pformat
from bcoding import bdecode
from torrent import Torrent
import util
PEER_ID = 'SISTER-' + ''.join(
random.choice(string.ascii_lowercase + string.digits)
for i in range(13)
)
MAX_RETRY = 2
class TrackerManager():
def __init__(self, torrent: Torrent):
self.torrent = torrent
self.trackers_url = torrent.getAnnounceList()
self.trackers = [Tracker(url[0], self.torrent.getInfoHash(), self.torrent.getSize()) for url in self.trackers_url]
self.trackers_tasks = [tracker.getPeers() for tracker in self.trackers]
self.tracker_responses = []
async def requestPeers(self):
self.tracker_responses = []
for res in asyncio.as_completed(self.trackers_tasks):
tracker_resp = await res
if tracker_resp:
self.tracker_responses.append(tracker_resp)
print(f'TRACKER: tracker_response now: {self.tracker_responses}')
def getPeersOnly(self):
peers = []
for response in self.tracker_responses:
for peer in response['peers']:
peer_item = {'ip': peer['ip'], 'port': peer['port']}
if peer_item not in peers:
peers.append(peer_item)
return peers
def getPeers(self):
peers = []
for response in self.tracker_responses:
for peer in response['peers']:
if not peer.get('peer id'):
peer['peer id'] = '???'
peer_item = {'peer id': peer['peer id'], 'ip': peer['ip'], 'port': peer['port']}
res_idx, res = util.searchDictIdx(peers, 'ip', peer_item['ip'])
if res_idx == -1:
peers.append(peer_item)
elif peer['peer id'] != '???' and res['peer id'] == '???':
peers.pop(res_idx)
peers.append(peer_item)
return peers
async def sendCompleted(self):
for tracker in self.trackers:
tracker.event = 'completed'
self.tracker_responses = []
tracker_tasks = [tracker.getPeers() for tracker in self.trackers]
for res in asyncio.as_completed(tracker_tasks):
tracker_resp = await res
if tracker_resp:
self.tracker_responses.append(tracker_resp)
# print(f'TRACKER: tracker_response now: {self.tracker_responses}')
else:
print(f'TRACKER_MAN: send complete done!')
class Tracker():
def __init__(self, tracker_url: str, info_hash, size: int):
self.url = tracker_url
if 'announce' not in self.url:
if self.url[-1] == '/':
self.url += 'announce'
else:
self.url += '/announce'
self.info_hash = info_hash
self.size = size
self.tries = 0
self.compact = 0
self.event = 'started'
async def getPeers(self) -> Dict:
tracker_response = await self.requestPeers()
# print(f'---TRACKER RESPONSE--- {self.url}:{self.info_hash}:{self.size}')
# print(tracker_response)
if tracker_response:
if isinstance(tracker_response['peers'], bytes):
tracker_response = self.unpackPeers(tracker_response)
self.event = ''
return tracker_response
async def requestPeers(self) -> Optional[Dict]:
async with ClientSession() as session:
try:
response = await session.get(self.url + '?' + self.getTrackerParams())
response_data = await response.read()
peers = bdecode(response_data)
return peers
except (TypeError, ValueError):
# print(f'TRACKER: cannot decode response from {self.url}')
# print(f'TRACKER: response: {response_data}')
self.tries += 1
if self.tries >= MAX_RETRY:
# print(f'TRACKER: cannot connect to tracker {self.url}!')
return
else:
# print(f'TRACKER: reconnecting... from tracker {self.url} using compact mode')
self.compact = 1
await asyncio.sleep(2)
await self.requestPeers()
except Exception as e:
# print(e)
# print(type(e))
self.tries += 1
if self.tries == MAX_RETRY:
# print(f'TRACKER: cannot connect to tracker {self.url}!')
return
else:
# print(f'TRACKER: reconnecting... from tracker {self.url} using compact mode')
self.compact = 1
await asyncio.sleep(2)
await self.requestPeers()
def getTrackerParams(self) -> str:
msg = {
'info_hash': self.info_hash,
'peer_id': PEER_ID,
'port': 52786,
'uploaded': 0,
'downloaded': 0,
'left': self.size,
'compact': self.compact,
'event': self.event
}
return urlencode(msg)
def unpackPeers(self, raw_response: Dict) -> Dict:
new_response = raw_response
peers = raw_response['peers']
temp = [peers[i:i+6] for i in range(0, len(peers), 6)]
new_peers = [{'ip': str(ipaddress.IPv4Address(peer[:4])), 'port': struct.unpack('>H', peer[4:])[0]} for peer in temp]
new_response['peers'] = new_peers
return new_response
async def main(torrent):
trackman = TrackerManager(torrent)
dump(trackman)
await trackman.requestPeers()
print('TRACKER: TASKS COMPLETED!')
print('TRACKER: peers')
print(pformat(trackman.getPeers()))
print('TRACKER: peers compact')
print(pformat(trackman.getPeersOnly()))
if __name__ == "__main__":
from dumper import dump
import messages
import piece
torrent = Torrent('sintel.torrent')
loop = asyncio.get_event_loop()
print(torrent.getAnnounceList())
loop.run_until_complete(main(torrent))
loop.close()
|
import torch
import numpy as np
import cv2
from dataset import FashionMNIST
from linear import Conceptor
from semantic import Semantic_Memory
from nearest import Nearest_Neighbor
from perspective import *
class Static_Hierarchy_Classifier:
def __init__(self, device, num_classes):
print("init")
self.device = device
self.num_classes = num_classes
# width, height
self.sample_size = (9, 9)
self.perspective_dim = (1, 19, 19)
self.perspective_count = self.perspective_dim[0] * self.perspective_dim[1] * self.perspective_dim[2]
self.part = {}
self.models = {}
self.view_param = {}
self.part[0] = get_perspective_kernels([[0, 0, 1], [0, 0, 1], [0, 0, 1]], scale=1)
self.view_param[0] = get_perspective_kernels([[0, 0, self.perspective_dim[0]], [-0.2, 0.2, self.perspective_dim[1]], [0.2, 0.2, self.perspective_dim[2]]], scale=1)
self.part[1] = get_perspective_kernels([[0, 0, 1], [-0.5, 0.5, 2], [-0.5, 0.5, 2]], scale=2)
self.view_param[1] = get_perspective_kernels([[0, 0, self.perspective_dim[0]], [-0.1, 0.1, self.perspective_dim[1]], [0.1, 0.1, self.perspective_dim[2]]], scale=1)
self.part[2] = get_perspective_kernels([[0, 0, 1], [-0.2, 0.2, 3], [-0.2, 0.2, 3]], scale=2)
self.view_param[2] = get_perspective_kernels([[0, 0, self.perspective_dim[0]], [-0.05, 0.05, self.perspective_dim[1]], [0.05, 0.05, self.perspective_dim[2]]], scale=1)
self.semantic = Nearest_Neighbor(device)
self.running_base_position = 0
def to_tensor(self, input, dtype=torch.float32):
return torch.tensor(input, dtype=dtype, device=self.device)
def get_min_index(self, score):
self.mid = self.perspective_count // 2
score[:, self.mid] -= 1e-6
index = torch.argmin(score, dim=1, keepdim=True)
return index
def layer(self, input, layer, id="", base_perspective=None, and_learn=False):
batches = input.shape[0]
bases = []
orders = []
if base_perspective is not None:
part_perspective = np.reshape(rebase(self.part[layer], base_perspective), [-1, 2, 3]) # (batch*num part, 2, 3)
else:
part_perspective = np.tile(self.part[layer], (input.shape[0], 1, 1))
count_parts = self.part[layer].shape[0]
count_views = self.view_param[layer].shape[0]
part_perspective = rebase(self.view_param[layer], part_perspective) # (batch*num part, num perspective, 2, 3)
perspectives = sample(input, self.to_tensor(np.reshape(part_perspective, [batches, -1, 2, 3])), size=self.sample_size) # (batch, num part * num perspective, ...)
perspectives = torch.reshape(perspectives, [batches, count_parts, count_views, -1])
for i in range(count_parts):
_flat = torch.reshape(perspectives[:, i, ...], [batches * count_views, -1])
_id = id + str(i)
if _id not in self.models:
self.models[_id] = Conceptor(self.device, max_bases=10)
if self.models[_id].get_count() == 0:
_projected = _flat
else:
_projected = self.models[_id].project(_flat)
scores = torch.mean(torch.reshape((_flat - _projected)**2, [batches, count_views, -1]), dim=2)
min_index = self.get_min_index(scores)
min_indices = torch.unsqueeze(min_index, 2).repeat(1, 1, perspectives.shape[3])
min_perspective = torch.gather(perspectives[:, i, ...], 1, min_indices)[:, 0, ...]
if and_learn:
self.running_base_position += self.models[_id].learn(min_perspective, 1, start_base_order=self.running_base_position, expand_threshold=1e-3)
hidden = (self.models[_id]) << min_perspective
bases.append(hidden)
orders.extend(self.models[_id].get_orders())
if layer < len(self.part) - 1:
min_view_param = self.view_param[layer][np.squeeze(min_index.cpu().numpy()), ...]
if len(min_view_param.shape) < 3:
min_view_param = np.expand_dims(min_view_param, 0)
_b, _o = self.layer(input, layer + 1, _id, min_view_param, and_learn)
bases += _b
orders += _o
return bases, orders
def reorder_bases(self, bases, orders):
_b = torch.cat(bases, dim=1)
return _b[:, orders]
def classify(self, input, output=None):
and_learn = output is not None
mark = self.running_base_position
bases, orders = self.layer(input, 0, "", None, and_learn=and_learn)
logits = self.reorder_bases(bases, orders)
if mark == 0:
prediction = torch.randint(10, (input.shape[0], ), dtype=torch.int64, device=device)
else:
prediction = self.semantic << logits[:, 0:mark]
if and_learn:
self.semantic.learn(logits, output, num_classes=self.num_classes)
return prediction
if __name__ == "__main__":
print("main")
device = torch.device("cuda:0")
batch_size = 1
dataset = FashionMNIST(device, batch_size=batch_size, max_per_class=60, seed=10, group_size=2)
classifier = Static_Hierarchy_Classifier(device, 10)
percent_correct = 0.0
for i, (data, label) in enumerate(dataset):
print("data: ", i)
input = data.to(device)
output = label.to(device)
# online test
current_bits = 0
prediction = classifier.classify(input, output=output).cpu()
count_correct = np.sum(prediction.numpy() == label.numpy())
percent_correct = 0.99 * percent_correct + 0.01 * count_correct * 100 / batch_size
print("Truth: ", dataset.readout(label))
print("Guess: ", dataset.readout(prediction))
print("Percent correct: ", percent_correct)
img = np.reshape(data.numpy(), [-1, data.shape[2]])
cv2.imshow("sample", img)
cv2.waitKey(10)
count = 0
for i, (data, label) in enumerate(dataset):
input = data.to(device)
output = label.to(device)
# test
prediction = classifier.classify(input).cpu()
count = count + np.sum(prediction.numpy() == label.numpy())
print("Percent correct: ", count * 100 / (len(dataset) * batch_size))
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Huawei.MA5300.get_version
# sergey.sadovnikov@gmail.com
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
"""
"""
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
import re
class Script(BaseScript):
name = "Huawei.MA5300.get_version"
cache = True
interface = IGetVersion
rx_platform = re.compile(r"SmartAX (?P<platform>\S+) \S+")
rx_ver = re.compile(r"Version (?P<version>\S+)")
def execute(self):
v = self.cli("show version", cached=True)
match = self.re_search(self.rx_ver, v)
version = match.group("version")
match = self.re_search(self.rx_platform, v)
platform = match.group("platform")
r = {"vendor": "Huawei", "platform": platform, "version": version}
return r
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Frame(Component):
"""A Frame component.
Frame is a wrapper for the <frame> HTML5 element.
For detailed attribute info see:
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/frame
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- accessKey (string; optional):
Keyboard shortcut to activate or add focus to the element.
- aria-* (string; optional):
A wildcard aria attribute.
- className (string; optional):
Often used with CSS to style elements with common properties.
- contentEditable (string; optional):
Indicates whether the element's content is editable.
- contextMenu (string; optional):
Defines the ID of a <menu> element which will serve as the
element's context menu.
- data-* (string; optional):
A wildcard data attribute.
- dir (string; optional):
Defines the text direction. Allowed values are ltr (Left-To-Right)
or rtl (Right-To-Left).
- draggable (string; optional):
Defines whether the element can be dragged.
- hidden (a value equal to: 'hidden', 'HIDDEN' | boolean; optional):
Prevents rendering of given element, while keeping child elements,
e.g. script elements, active.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- lang (string; optional):
Defines the language used in the element.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- n_clicks (number; default 0):
An integer that represents the number of times that this element
has been clicked on.
- n_clicks_timestamp (number; default -1):
An integer that represents the time (in ms since 1970) at which
n_clicks changed. This can be used to tell which button was
changed most recently.
- role (string; optional):
The ARIA role attribute.
- spellCheck (string; optional):
Indicates whether spell checking is allowed for the element.
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- tabIndex (string; optional):
Overrides the browser's default tab order and follows the one
specified instead.
- title (string; optional):
Text to be displayed in a tooltip when hovering over the element."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, key=Component.UNDEFINED, role=Component.UNDEFINED, accessKey=Component.UNDEFINED, className=Component.UNDEFINED, contentEditable=Component.UNDEFINED, contextMenu=Component.UNDEFINED, dir=Component.UNDEFINED, draggable=Component.UNDEFINED, hidden=Component.UNDEFINED, lang=Component.UNDEFINED, spellCheck=Component.UNDEFINED, style=Component.UNDEFINED, tabIndex=Component.UNDEFINED, title=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'accessKey', 'aria-*', 'className', 'contentEditable', 'contextMenu', 'data-*', 'dir', 'draggable', 'hidden', 'key', 'lang', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'role', 'spellCheck', 'style', 'tabIndex', 'title']
self._type = 'Frame'
self._namespace = 'dash_html_components'
self._valid_wildcard_attributes = ['data-', 'aria-']
self.available_properties = ['children', 'id', 'accessKey', 'aria-*', 'className', 'contentEditable', 'contextMenu', 'data-*', 'dir', 'draggable', 'hidden', 'key', 'lang', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'role', 'spellCheck', 'style', 'tabIndex', 'title']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Frame, self).__init__(children=children, **args)
|
from . import *
class AWS_KinesisAnalytics_ApplicationOutput_KinesisFirehoseOutput(CloudFormationProperty):
def write(self, w):
with w.block("kinesis_firehose_output"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_Application_CSVMappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("csv_mapping_parameters"):
self.property(w, "RecordRowDelimiter", "record_row_delimiter", StringValueConverter())
self.property(w, "RecordColumnDelimiter", "record_column_delimiter", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_CSVMappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("csv_mapping_parameters"):
self.property(w, "RecordRowDelimiter", "record_row_delimiter", StringValueConverter())
self.property(w, "RecordColumnDelimiter", "record_column_delimiter", StringValueConverter())
class AWS_KinesisAnalytics_Application_JSONMappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("json_mapping_parameters"):
self.property(w, "RecordRowPath", "record_row_path", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationOutput_DestinationSchema(CloudFormationProperty):
def write(self, w):
with w.block("destination_schema"):
self.property(w, "RecordFormatType", "record_format_type", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_S3ReferenceDataSource(CloudFormationProperty):
def write(self, w):
with w.block("s3_reference_data_source"):
self.property(w, "BucketARN", "bucket_arn", StringValueConverter())
self.property(w, "FileKey", "file_key", StringValueConverter())
self.property(w, "ReferenceRoleARN", "reference_role_arn", StringValueConverter())
class AWS_KinesisAnalytics_Application_MappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("mapping_parameters"):
self.block(w, "JSONMappingParameters", AWS_KinesisAnalytics_Application_JSONMappingParameters)
self.block(w, "CSVMappingParameters", AWS_KinesisAnalytics_Application_CSVMappingParameters)
class AWS_KinesisAnalytics_ApplicationOutput_KinesisStreamsOutput(CloudFormationProperty):
def write(self, w):
with w.block("kinesis_streams_output"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_Application_KinesisStreamsInput(CloudFormationProperty):
def write(self, w):
with w.block("kinesis_streams_input"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_JSONMappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("json_mapping_parameters"):
self.property(w, "RecordRowPath", "record_row_path", StringValueConverter())
class AWS_KinesisAnalytics_Application_RecordColumn(CloudFormationProperty):
def write(self, w):
with w.block("record_column"):
self.property(w, "Mapping", "mapping", StringValueConverter())
self.property(w, "SqlType", "sql_type", StringValueConverter())
self.property(w, "Name", "name", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_RecordColumn(CloudFormationProperty):
def write(self, w):
with w.block("record_column"):
self.property(w, "Mapping", "mapping", StringValueConverter())
self.property(w, "SqlType", "sql_type", StringValueConverter())
self.property(w, "Name", "name", StringValueConverter())
class AWS_KinesisAnalytics_Application_RecordFormat(CloudFormationProperty):
def write(self, w):
with w.block("record_format"):
self.block(w, "MappingParameters", AWS_KinesisAnalytics_Application_MappingParameters)
self.property(w, "RecordFormatType", "record_format_type", StringValueConverter())
class AWS_KinesisAnalytics_Application_KinesisFirehoseInput(CloudFormationProperty):
def write(self, w):
with w.block("kinesis_firehose_input"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_Application_InputParallelism(CloudFormationProperty):
def write(self, w):
with w.block("input_parallelism"):
self.property(w, "Count", "count", BasicValueConverter())
class AWS_KinesisAnalytics_Application_InputLambdaProcessor(CloudFormationProperty):
def write(self, w):
with w.block("input_lambda_processor"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationOutput_LambdaOutput(CloudFormationProperty):
def write(self, w):
with w.block("lambda_output"):
self.property(w, "ResourceARN", "resource_arn", StringValueConverter())
self.property(w, "RoleARN", "role_arn", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationOutput_Output(CloudFormationProperty):
def write(self, w):
with w.block("output"):
self.block(w, "DestinationSchema", AWS_KinesisAnalytics_ApplicationOutput_DestinationSchema)
self.block(w, "LambdaOutput", AWS_KinesisAnalytics_ApplicationOutput_LambdaOutput)
self.block(w, "KinesisFirehoseOutput", AWS_KinesisAnalytics_ApplicationOutput_KinesisFirehoseOutput)
self.block(w, "KinesisStreamsOutput", AWS_KinesisAnalytics_ApplicationOutput_KinesisStreamsOutput)
self.property(w, "Name", "name", StringValueConverter())
class AWS_KinesisAnalytics_Application_InputSchema(CloudFormationProperty):
def write(self, w):
with w.block("input_schema"):
self.property(w, "RecordEncoding", "record_encoding", StringValueConverter())
self.repeated_block(w, "RecordColumns", AWS_KinesisAnalytics_Application_RecordColumn)
self.block(w, "RecordFormat", AWS_KinesisAnalytics_Application_RecordFormat)
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_MappingParameters(CloudFormationProperty):
def write(self, w):
with w.block("mapping_parameters"):
self.block(w, "JSONMappingParameters", AWS_KinesisAnalytics_ApplicationReferenceDataSource_JSONMappingParameters)
self.block(w, "CSVMappingParameters", AWS_KinesisAnalytics_ApplicationReferenceDataSource_CSVMappingParameters)
class AWS_KinesisAnalytics_Application_InputProcessingConfiguration(CloudFormationProperty):
def write(self, w):
with w.block("input_processing_configuration"):
self.block(w, "InputLambdaProcessor", AWS_KinesisAnalytics_Application_InputLambdaProcessor)
class AWS_KinesisAnalytics_ApplicationOutput(CloudFormationResource):
cfn_type = "AWS::KinesisAnalytics::ApplicationOutput"
tf_type = "aws_kinesis_analytics_application_output" # TODO: Most likely not working
ref = "arn"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "ApplicationName", "application_name", StringValueConverter())
self.block(w, "Output", AWS_KinesisAnalytics_ApplicationOutput_Output)
class AWS_KinesisAnalytics_Application_Input(CloudFormationProperty):
def write(self, w):
with w.block("input"):
self.property(w, "NamePrefix", "name_prefix", StringValueConverter())
self.block(w, "InputSchema", AWS_KinesisAnalytics_Application_InputSchema)
self.block(w, "KinesisStreamsInput", AWS_KinesisAnalytics_Application_KinesisStreamsInput)
self.block(w, "KinesisFirehoseInput", AWS_KinesisAnalytics_Application_KinesisFirehoseInput)
self.block(w, "InputProcessingConfiguration", AWS_KinesisAnalytics_Application_InputProcessingConfiguration)
self.block(w, "InputParallelism", AWS_KinesisAnalytics_Application_InputParallelism)
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_RecordFormat(CloudFormationProperty):
def write(self, w):
with w.block("record_format"):
self.block(w, "MappingParameters", AWS_KinesisAnalytics_ApplicationReferenceDataSource_MappingParameters)
self.property(w, "RecordFormatType", "record_format_type", StringValueConverter())
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_ReferenceSchema(CloudFormationProperty):
def write(self, w):
with w.block("reference_schema"):
self.property(w, "RecordEncoding", "record_encoding", StringValueConverter())
self.repeated_block(w, "RecordColumns", AWS_KinesisAnalytics_ApplicationReferenceDataSource_RecordColumn)
self.block(w, "RecordFormat", AWS_KinesisAnalytics_ApplicationReferenceDataSource_RecordFormat)
class AWS_KinesisAnalytics_ApplicationReferenceDataSource_ReferenceDataSource(CloudFormationProperty):
def write(self, w):
with w.block("reference_data_source"):
self.block(w, "ReferenceSchema", AWS_KinesisAnalytics_ApplicationReferenceDataSource_ReferenceSchema)
self.property(w, "TableName", "table_name", StringValueConverter())
self.block(w, "S3ReferenceDataSource", AWS_KinesisAnalytics_ApplicationReferenceDataSource_S3ReferenceDataSource)
class AWS_KinesisAnalytics_ApplicationReferenceDataSource(CloudFormationResource):
cfn_type = "AWS::KinesisAnalytics::ApplicationReferenceDataSource"
tf_type = "aws_kinesis_analytics_application_reference_data_source" # TODO: Most likely not working
ref = "arn"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "ApplicationName", "application_name", StringValueConverter())
self.block(w, "ReferenceDataSource", AWS_KinesisAnalytics_ApplicationReferenceDataSource_ReferenceDataSource)
class AWS_KinesisAnalytics_Application(CloudFormationResource):
cfn_type = "AWS::KinesisAnalytics::Application"
tf_type = "aws_kinesis_analytics_application"
ref = "id"
attrs = {} # Additional TF attributes: arn, create_timestamp, last_update_timestamp, status, version
def write(self, w):
with self.resource_block(w):
self.property(w, "ApplicationName", "name", StringValueConverter())
self.repeated_block(w, "Inputs", AWS_KinesisAnalytics_Application_Input)
self.property(w, "ApplicationDescription", "description", StringValueConverter())
self.property(w, "ApplicationCode", "code", StringValueConverter())
|
expected_output = {
'vpn_id': {
1000: {
'encap': 'MPLS',
'esi': '0001.00ff.0102.0000.0011',
'eth_tag': 0,
'label': 100001,
'mp_info': 'Remote all-active, ECMP Disable',
'mp_resolved': True,
'pathlists': {
'ead_es': {
'nexthop': {
'172.16.2.89': {
'label': 0,
},
},
},
'ead_evi': {
'nexthop': {
'172.16.2.89': {
'label': 100001,
},
},
},
'mac': {
'nexthop': {
'172.16.2.89': {
'label': 100001,
},
},
},
'summary': {
'nexthop': {
'172.16.2.89': {
'df_role': '(P)',
'label': 100001,
'value': '0xffffffff',
},
},
},
},
'vpn_id': 1000,
},
},
}
|
'''
Created on 31 Oct 2012
@author: kreczko
'''
from __future__ import division
import unittest
from random import random
import numpy as np
from rootpy.plotting import Hist2D
# under test
from tools.Calculation import calculate_purities
from tools.Calculation import calculate_stabilities
from tools.Calculation import decombine_result
class Test( unittest.TestCase ):
def setUp( self ):
# we only test symmetric bins for now
self.n_bins_x = 6
self.n_bins_y = 6
# only entries in diagonals, p = 1, s = 1 for all bins
self.best_case = Hist2D( self.n_bins_x, -3, 3, self.n_bins_y, 0, 6 )
for i in range( 1, self.n_bins_x + 1 ):
self.best_case.SetBinContent( i, i, random() * 1000 )
# random eclipse
self.random_elipse = Hist2D( self.n_bins_x, -3, 3, self.n_bins_y, 0, 6 )
self.random_elipse.fill_array(
np.random.multivariate_normal(
mean = ( 0, 3 ),
cov = [[1., 1.12], [1.12, 2.25]],
size = ( 1000 )
)
)
# this creates
# [4, 0, 0, 0, 0, 1],
# [0, 0, 0, 0, 1, 0],
# [0, 0, 0, 1, 0, 0],
# [0, 0, 1, 0, 0, 0],
# [0, 1, 0, 0, 0, 0],
# [1, 0, 0, 0, 0, 3],
# this should result in a purity and stability value of 1 for all bins
# except the first and last. The first bin should have p = 1/5 and
# s = 1/4 and the last bin should have p = 1/4 and s = 1/5
self.pre_calculated = Hist2D( self.n_bins_x, -3, 3, self.n_bins_y, 0, 6 )
for i in range( 1, self.n_bins_x + 1 ):
self.pre_calculated.SetBinContent( i, i, 1 )
self.pre_calculated.SetBinContent( 1, self.n_bins_y, 4 )
self.pre_calculated.SetBinContent( self.n_bins_x, 1, 3 )
def tearDown( self ):
pass
def test_best_case_purity( self ):
purities = calculate_purities( self.best_case )
self.assertEqual( len( purities ), self.n_bins_x, 'Invalid number of purity terms' )
for p in purities:
self.assertEqual( p, 1 )
def test_best_case_stability( self ):
stabilities = calculate_stabilities( self.best_case )
self.assertEqual( len( stabilities ), self.n_bins_x, 'Invalid number of stability terms' )
for s in stabilities:
self.assertEqual( s, 1 )
def test_random_elipse_purity( self ):
purities = calculate_purities( self.random_elipse )
self.assertEqual( len( purities ), self.n_bins_x, 'Invalid number of purity terms' )
# purities should always be above 0 and below ~0.5
for p in purities:
self.assertGreater( p, 0 )
self.assertLess( p, 0.5 )
def test_random_elipse_stability( self ):
stabilities = calculate_stabilities( self.random_elipse )
self.assertEqual( len( stabilities ), self.n_bins_x, 'Invalid number of stability terms' )
# stabilities should always be above 0 and below ~0.6
for s in stabilities:
self.assertGreater( s, 0 )
self.assertLess( s, 0.6 )
def test_pre_calculated_purity( self ):
purities = calculate_purities( self.pre_calculated )
self.assertEqual( len( purities ), self.n_bins_x, 'Invalid number of purity terms' )
for p in purities[1:-1]:
self.assertEqual( p, 1 )
self.assertEqual( purities[0], 0.2 )
self.assertEqual( purities[-1], 0.25 )
def test_pre_calculated_stability( self ):
stabilities = calculate_stabilities( self.pre_calculated )
self.assertEqual( len( stabilities ), self.n_bins_x, 'Invalid number of stability terms' )
for s in stabilities[1:-1]:
self.assertEqual( s, 1 )
self.assertEqual( stabilities[0], 0.25 )
self.assertEqual( stabilities[-1], 0.2 )
def test_decombine_result_default(self):
N_signal = 100
N_background = 20
N_total = N_signal + N_background
ratio_signal_bkg = N_signal/N_background
N_total_prime = N_total * 2
N_signal_prime, N_background_prime = decombine_result((N_total_prime, 0), ratio_signal_bkg)
self.assertEqual(N_signal_prime[0], N_signal * 2)
self.assertEqual(N_background_prime[0], N_background * 2)
def test_decombine_result_background_free(self):
N_signal = 100
N_background = 0
N_total = N_signal
ratio_signal_bkg = 0
N_total_prime = N_total * 2
N_signal_prime, N_background_prime = decombine_result((N_total_prime, 0), ratio_signal_bkg)
self.assertEqual(N_signal_prime[0], N_signal * 2)
self.assertEqual(N_background_prime[0], N_background * 2)
def test_decombine_result_multiple_backgrounds(self):
N_signal = 100
N_background_1 = 20
N_background_2 = 40
N_total = N_signal + N_background_1 + N_background_2
# ratio of bkg_1 to other samples
ratio_signal_bkg_1 = (N_signal + N_background_2)/N_background_1
# ratio of bkg_2 to signal
ratio_signal_bkg_2 = N_signal/N_background_2
N_total_prime = N_total * 2
N_signal_plus_bkg_2_prime, N_background_1_prime = decombine_result((N_total_prime, 0), ratio_signal_bkg_1)
N_signal_prime, N_background_2_prime = decombine_result(N_signal_plus_bkg_2_prime, ratio_signal_bkg_2)
self.assertEqual(N_signal_prime[0], N_signal * 2)
self.assertEqual(N_background_1_prime[0], N_background_1 * 2)
self.assertEqual(N_background_2_prime[0], N_background_2 * 2)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testTemplates']
unittest.main()
|
import numpy as np
from torch import Tensor
from train_utils import training_step, model_validation
def test_train_step(module_dict):
model = module_dict["model"]
optimizer = module_dict["optimizer"]
criterion = module_dict["criterion"]
train_dataloader = module_dict["train_dataloader"]
for batch in train_dataloader:
loss = training_step(model, optimizer, criterion, batch)
assert isinstance(loss, Tensor)
def test_val_step(module_dict):
model = module_dict["model"]
criterion = module_dict["criterion"]
val_dataloader = module_dict["val_dataloader"]
metrics = model_validation(model, criterion, val_dataloader)
assert isinstance(metrics["val_loss"], float)
assert isinstance(metrics["precision"], float)
|
from formula3 import formula
from const import *
from math import *
v_circ = formula("circular orbit velocity",
v='({u}/{r})**0.5',
u='{r}*{v}**2',
r='{u}/{v}**2')
v_elip = formula("elipse orbit velocity",
v='(2*{u}/{r}-{u}/{a})**0.5',
u='{r}*{v}**2',
r='{u}/{v}**2')
delta_v = formula("",
dv='g*isp*log(mi/mf)')
f_grav= formula("gravitational force",
fg="{u}*{m}/{r}**2")
f_c = formula("centrifugal force",
fc="{v}**2/{r}")
e_pot = formula("",
ep="-{u}*{m}/{r}"
e_kin = formula("",
ek="v**2*m/2")
t = formula("orbital period",
t="2*pi*({a}**3/{u})**0.5")
ecc = formula('eccentricity',
e='({ra}-{rp})/({ra}+{rp})'
n = formula('mean motion',
n="({u}/{a}**3)**0.5")
|
from functools import singledispatch
from . import numpy
@singledispatch
def RmsProp(machine, learning_rate=0.001, beta=0.9, epscut=1.0e-7):
r"""RMSProp optimizer.
RMSProp is a well-known update algorithm proposed by Geoff Hinton
in his Neural Networks course notes `Neural Networks course notes
<http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_.
It corrects the problem with AdaGrad by using an exponentially weighted
moving average over past squared gradients instead of a cumulative sum.
After initializing the vector :math:`\mathbf{s}` to zero, :math:`s_k` and t
he parameters :math:`p_k` are updated as
.. math:: s^\prime_k = \beta s_k + (1-\beta) G_k(\mathbf{p})^2 \\
p^\prime_k = p_k - \frac{\eta}{\sqrt{s_k}+\epsilon} G_k(\mathbf{p})
Constructs a new ``RmsProp`` optimizer.
Args:
learning_rate: The learning rate :math:`\eta`
beta: Exponential decay rate.
epscut: Small cutoff value.
Examples:
RmsProp optimizer.
>>> from netket.optimizer import RmsProp
>>> op = RmsProp(learning_rate=0.02)
"""
return numpy.RmsProp(learning_rate, beta, epscut)
|
"""
Native Japanese pronunciations for characters based on Kunyomi
pronunciations from Wiktionary. These include guesses on application
of rendaku.
"""
__author__ = """
rws@uiuc.edu (Richard Sproat)
"""
KUNYOMI_ = {}
RENDAKU_ = {}
def RendakuWorldBet(worldbet):
"""If the romaji is marked with '*' the form may undergo rendaku
"""
worldbet = worldbet.split()
if worldbet[0] == 'h': return 'b ' + ' '.join(worldbet[1:])
if worldbet[0] == 't': return 'd ' + ' '.join(worldbet[1:])
if worldbet[0] == 'k': return 'g ' + ' '.join(worldbet[1:])
if worldbet[0] == 's': return 'z ' + ' '.join(worldbet[1:])
if worldbet[0] == 'ts': return 'z ' + ' '.join(worldbet[1:])
if worldbet[0] == 'S': return 'j ' + ' '.join(worldbet[1:])
return ' '.join(worldbet)
def LoadKunyomiWbTable(table):
if KUNYOMI_: return ## already loaded
p = open(table)
lines = p.readlines()
p.close()
for line in lines:
line = line.strip().split('\t')
romaji = line[2].strip()
pron = line[3].strip()
KUNYOMI_[line[0]] = pron
if romaji[0] == '*': RENDAKU_[line[0]] = True
def KanjiToWorldBet(string):
output = []
some_success = False
internal = False
for c in unicode(string, 'utf8'):
c = c.encode('utf-8')
try:
pron = KUNYOMI_[c]
if internal and c in RENDAKU_:
pron = RendakuWorldBet(pron)
output.append(pron)
some_success = True
except KeyError:
output.append(c)
internal = True
return ' '.join(output), some_success
|
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import scipy.io
from scipy.stats import norm as scipy_norm
import seaborn as sns
from utils.misc import mkdir
from utils.plot import plot_prediction_bayes, plot_MC
from utils.lhs import lhs
from args import args, device
plt.switch_backend('agg')
assert args.post, 'Add --post flag in command line for post-proc tasks, e.g. UQ.'
run_dir = args.run_dir
ntrain = args.ntrain
plot_fn=args.plot_fn
epochs = args.epochs
class UQ(object):
r"""Class for uncertainty quantification tasks, include:
- prediction at one input realization
- uncertainty propagation
- distribution estimate at certain location
- reliability diagram (assess uncertainty quality)
Args:
bayes_nn (bayes_nn.BayesNN): Pre-trained Bayesian NN
mc_loader (utils.data.DataLoader): Dataloader for Monte Carlo data
"""
def __init__(self, bayes_nn, mc_loader):
self.bnn = bayes_nn
self.mc_loader = mc_loader
def plot_prediction_at_x(self, n_pred):
r"""Plot `n_pred` predictions for randomly selected input from MC dataset.
- target
- predictive mean
- error of the above two
- two standard deviation of predictive output distribution
Args:
n_pred: number of candidate predictions
"""
print('Plotting predictions at x from MC dataset......................')
np.random.seed(1)
idx = np.random.permutation(len(self.mc_loader.dataset))[:n_pred]
for i in idx:
print('input index: {}'.format(i))
input, target = self.mc_loader.dataset[i]
pred_mean, pred_var = self.bnn.predict(input.unsqueeze(0).to(device))
save_dir = run_dir + '/predict_at_x'
mkdir(save_dir)
plot_prediction_bayes(save_dir, target, pred_mean.squeeze(0),
pred_var.squeeze(0), epochs, i, plot_fn=plot_fn)
def propagate_uncertainty(self):
print("Propagate Uncertainty using pre-trained surrogate .............")
# compute MC sample mean and variance in mini-batch
sample_mean_x = torch.zeros_like(self.mc_loader.dataset[0][0])
sample_var_x = torch.zeros_like(sample_mean_x)
sample_mean_y = torch.zeros_like(self.mc_loader.dataset[0][1])
sample_var_y = torch.zeros_like(sample_mean_y)
for _, (x_test_mc, y_test_mc) in enumerate(self.mc_loader):
x_test_mc, y_test_mc = x_test_mc, y_test_mc
sample_mean_x += x_test_mc.mean(0)
sample_mean_y += y_test_mc.mean(0)
sample_mean_x /= len(self.mc_loader)
sample_mean_y /= len(self.mc_loader)
for _, (x_test_mc, y_test_mc) in enumerate(self.mc_loader):
x_test_mc, y_test_mc = x_test_mc, y_test_mc
sample_var_x += ((x_test_mc - sample_mean_x) ** 2).mean(0)
sample_var_y += ((y_test_mc - sample_mean_y) ** 2).mean(0)
sample_var_x /= len(self.mc_loader)
sample_var_y /= len(self.mc_loader)
# plot input MC
stats_x = torch.stack((sample_mean_x, sample_var_x)).cpu().numpy()
fig, _ = plt.subplots(1, 2)
for i, ax in enumerate(fig.axes):
# ax.set_title(titles[i])
ax.set_aspect('equal')
ax.set_axis_off()
# im = ax.imshow(stats_x[i].squeeze(0),
# interpolation='bilinear', cmap=self.args.cmap)
im = ax.contourf(stats_x[i].squeeze(0), 50, cmap='jet')
for c in im.collections:
c.set_edgecolor("face")
c.set_linewidth(0.000000000001)
cbar = plt.colorbar(im, ax=ax, fraction=0.046, pad=0.04,
format=ticker.ScalarFormatter(useMathText=True))
cbar.formatter.set_powerlimits((0, 0))
cbar.ax.yaxis.set_offset_position('left')
cbar.update_ticks()
plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=0.5)
out_stats_dir = run_dir + '/out_stats'
mkdir(out_stats_dir)
plt.savefig(out_stats_dir + '/input_MC.pdf', di=300, bbox_inches='tight')
plt.close(fig)
print("Done plotting input MC, num of training: {}".format(ntrain))
# MC surrogate predictions
y_pred_EE, y_pred_VE, y_pred_EV, y_pred_VV = self.bnn.propagate(self.mc_loader)
print('Done MC predictions')
# plot the 4 output stats
# plot the predictive mean
plot_MC(out_stats_dir, sample_mean_y, y_pred_EE, y_pred_VE, True, ntrain)
# plot the predictive var
plot_MC(out_stats_dir, sample_var_y, y_pred_EV, y_pred_VV, False, ntrain)
# save for MATLAB plotting
scipy.io.savemat(out_stats_dir + '/out_stats.mat',
{'sample_mean': sample_mean_y.cpu().numpy(),
'sample_var': sample_var_y.cpu().numpy(),
'y_pred_EE': y_pred_EE.cpu().numpy(),
'y_pred_VE': y_pred_VE.cpu().numpy(),
'y_pred_EV': y_pred_EV.cpu().numpy(),
'y_pred_VV': y_pred_VV.cpu().numpy()})
print('saved output stats to .mat file')
def plot_dist(self, num_loc):
"""Plot distribution estimate in `num_loc` locations in the domain,
which are chosen by Latin Hypercube Sampling.
Args:
num_loc (int): number of locations where distribution is estimated
"""
print('Plotting distribution estimate.................................')
assert num_loc > 0, 'num_loc must be greater than zero'
locations = lhs(2, num_loc, criterion='c')
print('Locations selected by LHS: \n{}'.format(locations))
# location (ndarray): [0, 1] x [0, 1]: N x 2
idx = (locations * 65).astype(int)
print('Propagating...')
pred, target = [], []
for _, (x_mc, t_mc) in enumerate(self.mc_loader):
x_mc = x_mc.to(device)
# S x B x C x H x W
y_mc = self.bnn.forward(x_mc)
# S x B x C x n_points
pred.append(y_mc[:, :, :, idx[:, 0], idx[:, 1]])
# B x C x n_points
target.append(t_mc[:, :, idx[:, 0], idx[:, 1]])
# S x M x C x n_points --> M x C x n_points
pred = torch.cat(pred, dim=1).mean(0).cpu().numpy()
print('pred size: {}'.format(pred.shape))
# M x C x n_points
target = torch.cat(target, dim=0).cpu().numpy()
print('target shape: {}'.format(target.shape))
dist_dir = run_dir + '/dist_estimate'
mkdir(dist_dir)
for loc in range(locations.shape[0]):
print(loc)
fig, _ = plt.subplots(1, 3, figsize=(12, 4))
for c, ax in enumerate(fig.axes):
sns.kdeplot(target[:, c, loc], color='b', ls='--', label='Monte Carlo', ax=ax)
sns.kdeplot(pred[:, c, loc], color='r', label='surrogate', ax=ax)
ax.legend()
plt.savefig(dist_dir + '/loc_({}, {}).pdf'
.format(locations[loc][0], locations[loc][1]), dpi=300)
plt.close(fig)
def plot_reliability_diagram(self):
print("Plotting reliability diagram..................................")
# percentage: p
# p_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95]
p_list = np.linspace(0.01, 0.99, 10)
freq = []
for p in p_list:
count = 0
numels = 0
for batch_idx, (input, target) in enumerate(self.mc_loader):
# only evaluate 2000 of the MC data to save time
if batch_idx > 4:
continue
pred_mean, pred_var = self.bnn.predict(input.to(device))
interval = scipy_norm.interval(p, loc=pred_mean.cpu().numpy(),
scale=pred_var.sqrt().cpu().numpy())
count += ((target.numpy() >= interval[0])
& (target.numpy() <= interval[1])).sum()
numels += target.numel()
print('p: {}, {} / {} = {}'.format(p, count, numels,
np.true_divide(count, numels)))
freq.append(np.true_divide(count, numels))
reliability_dir = run_dir + '/uncertainty_quality'
mkdir(reliability_dir)
plt.figure()
plt.plot(p_list, freq, 'r', label='Bayesian surrogate')
plt.xlabel('Probability')
plt.ylabel('Frequency')
x = np.linspace(0, 1, 100)
plt.plot(x, x, 'k--', label='ideal')
plt.legend(loc='upper left')
plt.savefig(reliability_dir + "/reliability_diagram.pdf", dpi=300)
reliability = np.zeros((p_list.shape[0], 2))
reliability[:, 0] = p_list
reliability[:, 1] = np.array(freq)
np.savetxt(reliability_dir + "/reliability_diagram.txt", reliability)
plt.close()
|
import asyncio
import logging
import sys
from lib.interface import OrderBook
ftx = __import__('2_ftx_ans')
# Create a class to represent a candlestick (or bar) with open, high, low and close variables
class Bar:
# create an empty bar
def __init__(self):
self.open = None
self.high = None
self.low = None
self.close = None
self.reset()
# copy the values of the provided bar
def copy(self, _bar):
self.open = _bar.open
self.high = _bar.high
self.low = _bar.low
self.close = _bar.close
# reset the values
def reset(self):
self.open = None
self.high = 0
self.low = 10000000
self.close = None
# A global variable to capture candlestick info in the current period that is still developing
liveBar = Bar()
# A global variable to capture last candlestick
bar = Bar()
# This callback captures the open, high, low and close of a bar
def ftx_depth_callback(contract_name: str, book: OrderBook):
mid = 0.5 * (book.bids[0].price + book.asks[0].price)
if not liveBar.open:
liveBar.open = mid
if mid > liveBar.high:
liveBar.high = mid
if mid < liveBar.low:
liveBar.low = mid
liveBar.close = mid
# This method is triggered periodically to conclude a bar
async def capture_bar(period):
await asyncio.sleep(period)
bar.copy(liveBar)
liveBar.reset()
print('O: ' + str(bar.open) + ' H: ' + str(bar.high) + ' L: ' + str(bar.low) + ' C: ' + str(bar.close))
if __name__ == '__main__':
# logging stuff
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
handler.setFormatter(logFormatter)
root.addHandler(handler)
logging.info("main program starts")
# start FTX connection and register callbacks.
contracts = {'BTC-PERP'}
ftx = ftx.FtxManager(symbol=contracts)
ftx.register_depth_callback(ftx_depth_callback)
ftx.connect()
# loop to capture candlestick periodically
while True:
asyncio.run(capture_bar(60))
|
__test__ = False
if __name__ == '__main__':
import eventlet
import eventlet.tpool
import gc
import pprint
class RequiredException(Exception):
pass
class A(object):
def ok(self):
return 'ok'
def err(self):
raise RequiredException
a = A()
# case 1 no exception
assert eventlet.tpool.Proxy(a).ok() == 'ok'
# yield to tpool_trampoline(), otherwise e.send(rv) have a reference
eventlet.sleep(0.1)
gc.collect()
refs = gc.get_referrers(a)
assert len(refs) == 1, 'tpool.Proxy-ied object leaked: {}'.format(pprint.pformat(refs))
# case 2 with exception
def test_exception():
try:
eventlet.tpool.Proxy(a).err()
assert False, 'expected exception'
except RequiredException:
pass
test_exception()
# yield to tpool_trampoline(), otherwise e.send(rv) have a reference
eventlet.sleep(0.1)
gc.collect()
refs = gc.get_referrers(a)
assert len(refs) == 1, 'tpool.Proxy-ied object leaked: {}'.format(pprint.pformat(refs))
print('pass')
|
from distutils.core import setup
import py2exe
setup(console=['debugFinderJs.py'])
|
from enum import Enum
from entity import Entity
from core.database import FromDB, DBData
from core.constants import AfflictType
from typing import Optional
class AffGroup(Enum):
DOT = 1
CC = 2
class Affliction(FromDB, table="AbnormalStatusType"):
def __init__(self, aff_type: AfflictType, entity: Entity) -> None:
super().__init__(aff_type.value)
self.entity = entity
self.resist = 0
self.gain = self._data["_ResistGain"]
self.group = AffGroup(self._data["_Group"])
@staticmethod
def _get_name(data: Optional[DBData]):
if not data:
return None
return data.get("_AbnormalName")
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: MuadDib
import re,traceback,urllib,urlparse,json
from resources.lib.modules import cfscrape
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import jsunpack
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['pubfilmonline.net']
self.base_link = 'http://pubfilmonline.net/'
self.post_link = '/wp-admin/admin-ajax.php'
self.search_link = '/?s=%s'
self.scraper = cfscrape.create_scraper()
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(title),year)
r = self.scraper.get(url).content
if '<h2>ERROR <span>404</span></h2>' in r:
url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(title))
r = self.scraper.get(url).content
if '<h2>ERROR <span>404</span></h2>' in r: return
return url
except:
failure = traceback.format_exc()
log_utils.log('PubFilmOnline - Exception: \n' + str(failure))
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('PubFilmOnline - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('PubFilmOnline - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
r = self.scraper.get(url).content
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
y = re.findall('(\d{4})', y)[0]
if not y == year: raise Exception()
else:
r = self.scraper.get(url).content
result = re.findall('''['"]file['"]:['"]([^'"]+)['"],['"]label['"]:['"]([^'"]+)''', r)
for i in result:
url = i[0].replace('\/', '/')
sources.append({'source': 'gvideo', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('PubFilmOnline - Exception: \n' + str(failure))
return sources
def resolve(self, url):
if 'google' in url:
return directstream.googlepass(url)
else:
return url
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import mock
import os
import shutil
import tempfile
from future.tests.base import unittest
from mobly import utils
from mobly import runtime_test_info
from mobly.controllers import android_device
from mobly.controllers.android_device_lib import adb
from mobly.controllers.android_device_lib.services import logcat
from tests.lib import mock_android_device
# The expected result of the cat adb operation.
MOCK_ADB_LOGCAT_CAT_RESULT = [
'02-29 14:02:21.456 4454 Something\n',
'02-29 14:02:21.789 4454 Something again\n'
]
# A mocked piece of adb logcat output.
MOCK_ADB_LOGCAT = (u'02-29 14:02:19.123 4454 Nothing\n'
u'%s'
u'02-29 14:02:22.123 4454 Something again and again\n'
) % u''.join(MOCK_ADB_LOGCAT_CAT_RESULT)
# The expected result of the cat adb operation.
MOCK_ADB_UNICODE_LOGCAT_CAT_RESULT = [
'02-29 14:02:21.456 4454 Something \u901a\n',
'02-29 14:02:21.789 4454 Something again\n'
]
# A mocked piece of adb logcat output.
MOCK_ADB_UNICODE_LOGCAT = (
u'02-29 14:02:19.123 4454 Nothing\n'
u'%s'
u'02-29 14:02:22.123 4454 Something again and again\n'
) % u''.join(MOCK_ADB_UNICODE_LOGCAT_CAT_RESULT)
# Mock start and end time of the adb cat.
MOCK_ADB_LOGCAT_BEGIN_TIME = '02-29 14:02:20.123'
MOCK_ADB_LOGCAT_END_TIME = '02-29 14:02:22.000'
# Mock AdbError for missing logpersist scripts
MOCK_LOGPERSIST_STOP_MISSING_ADB_ERROR = adb.AdbError(
'logpersist.stop --clear', b'',
'/system/bin/sh: logpersist.stop: not found', 0)
MOCK_LOGPERSIST_START_MISSING_ADB_ERROR = adb.AdbError(
'logpersist.start --clear', b'',
b'/system/bin/sh: logpersist.stop: not found', 0)
class LogcatTest(unittest.TestCase):
"""Tests for Logcat service and its integration with AndroidDevice."""
def setUp(self):
# Set log_path to logging since mobly logger setup is not called.
if not hasattr(logging, 'log_path'):
setattr(logging, 'log_path', '/tmp/logs')
# Creates a temp dir to be used by tests in this test class.
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
"""Removes the temp dir.
"""
shutil.rmtree(self.tmp_dir)
def AssertFileContains(self, content, file_path):
with open(file_path, 'r') as f:
output = f.read()
self.assertIn(content, output)
def AssertFileDoesNotContain(self, content, file_path):
with open(file_path, 'r') as f:
output = f.read()
self.assertNotIn(content, output)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_start_and_stop(self, stop_proc_mock, start_proc_mock,
create_dir_mock, FastbootProxy, MockAdbProxy):
"""Verifies the steps of collecting adb logcat on an AndroidDevice
object, including various function calls and the expected behaviors of
the calls.
"""
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
# Verify start did the correct operations.
self.assertTrue(logcat_service._adb_logcat_process)
expected_log_path = os.path.join(logging.log_path,
'AndroidDevice%s' % ad.serial,
'adblog,fakemodel,%s.txt' % ad.serial)
create_dir_mock.assert_called_with(os.path.dirname(expected_log_path))
adb_cmd = '"adb" -s %s logcat -v threadtime >> %s'
start_proc_mock.assert_called_with(
adb_cmd % (ad.serial, '"%s"' % expected_log_path), shell=True)
self.assertEqual(logcat_service.adb_logcat_file_path,
expected_log_path)
expected_msg = (
'Logcat thread is already running, cannot start another'
' one.')
# Expect error if start is called back to back.
with self.assertRaisesRegex(logcat.Error, expected_msg):
logcat_service.start()
# Verify stop did the correct operations.
logcat_service.stop()
stop_proc_mock.assert_called_with('process')
self.assertIsNone(logcat_service._adb_logcat_process)
self.assertEqual(logcat_service.adb_logcat_file_path,
expected_log_path)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_update_config(self, stop_proc_mock, start_proc_mock,
create_dir_mock, FastbootProxy, MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
logcat_service.stop()
new_log_params = '-a -b -c'
new_file_path = 'some/path/log.txt'
new_config = logcat.Config(logcat_params=new_log_params,
output_file_path=new_file_path)
logcat_service.update_config(new_config)
logcat_service.start()
self.assertTrue(logcat_service._adb_logcat_process)
create_dir_mock.assert_has_calls([mock.call('some/path')])
expected_adb_cmd = ('"adb" -s 1 logcat -v threadtime -a -b -c >> '
'"some/path/log.txt"')
start_proc_mock.assert_called_with(expected_adb_cmd, shell=True)
self.assertEqual(logcat_service.adb_logcat_file_path,
'some/path/log.txt')
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_update_config_while_running(self, stop_proc_mock, start_proc_mock,
create_dir_mock, FastbootProxy,
MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
new_config = logcat.Config(logcat_params='-blah',
output_file_path='some/path/file.txt')
with self.assertRaisesRegex(
logcat.Error,
'Logcat thread is already running, cannot start another one'):
logcat_service.update_config(new_config)
self.assertTrue(logcat_service.is_alive)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch(
'mobly.controllers.android_device_lib.services.logcat.Logcat.clear_adb_log',
return_value=mock_android_device.MockAdbProxy('1'))
def test_pause_and_resume(self, clear_adb_mock, stop_proc_mock,
start_proc_mock, create_dir_mock, FastbootProxy,
MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad, logcat.Config(clear_log=True))
logcat_service.start()
clear_adb_mock.assert_called_once_with()
self.assertTrue(logcat_service.is_alive)
logcat_service.pause()
self.assertFalse(logcat_service.is_alive)
stop_proc_mock.assert_called_with('process')
self.assertIsNone(logcat_service._adb_logcat_process)
clear_adb_mock.reset_mock()
logcat_service.resume()
self.assertTrue(logcat_service.is_alive)
clear_adb_mock.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch(
'mobly.controllers.android_device_lib.services.logcat.Logcat.clear_adb_log',
return_value=mock_android_device.MockAdbProxy('1'))
def test_logcat_service_create_excerpt(self, clear_adb_mock,
stop_proc_mock, start_proc_mock,
FastbootProxy, MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
FILE_CONTENT = 'Some log.\n'
with open(logcat_service.adb_logcat_file_path, 'w') as f:
f.write(FILE_CONTENT)
test_output_dir = os.path.join(self.tmp_dir, 'test_foo')
mock_record = mock.MagicMock()
mock_record.begin_time = 123
test_run_info = runtime_test_info.RuntimeTestInfo(
'test_foo', test_output_dir, mock_record)
logcat_service.create_per_test_excerpt(test_run_info)
expected_path1 = os.path.join(test_output_dir, 'test_foo-123',
'adblog,fakemodel,1.txt')
self.assertTrue(os.path.exists(expected_path1))
self.AssertFileContains(FILE_CONTENT, expected_path1)
self.assertFalse(os.path.exists(logcat_service.adb_logcat_file_path))
# Generate some new logs and do another excerpt.
FILE_CONTENT = 'Some more logs!!!\n'
with open(logcat_service.adb_logcat_file_path, 'w') as f:
f.write(FILE_CONTENT)
test_output_dir = os.path.join(self.tmp_dir, 'test_bar')
mock_record = mock.MagicMock()
mock_record.begin_time = 456
test_run_info = runtime_test_info.RuntimeTestInfo(
'test_bar', test_output_dir, mock_record)
logcat_service.create_per_test_excerpt(test_run_info)
expected_path2 = os.path.join(test_output_dir, 'test_bar-456',
'adblog,fakemodel,1.txt')
self.assertTrue(os.path.exists(expected_path2))
self.AssertFileContains(FILE_CONTENT, expected_path2)
self.AssertFileDoesNotContain(FILE_CONTENT, expected_path1)
self.assertFalse(os.path.exists(logcat_service.adb_logcat_file_path))
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_take_logcat_with_extra_params(self, stop_proc_mock,
start_proc_mock, create_dir_mock,
FastbootProxy, MockAdbProxy):
"""Verifies the steps of collecting adb logcat on an AndroidDevice
object, including various function calls and the expected behaviors of
the calls.
"""
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
configs = logcat.Config()
configs.logcat_params = '-b radio'
logcat_service = logcat.Logcat(ad, configs)
logcat_service.start()
# Verify start did the correct operations.
self.assertTrue(logcat_service._adb_logcat_process)
expected_log_path = os.path.join(logging.log_path,
'AndroidDevice%s' % ad.serial,
'adblog,fakemodel,%s.txt' % ad.serial)
create_dir_mock.assert_called_with(os.path.dirname(expected_log_path))
adb_cmd = '"adb" -s %s logcat -v threadtime -b radio >> %s'
start_proc_mock.assert_called_with(
adb_cmd % (ad.serial, '"%s"' % expected_log_path), shell=True)
self.assertEqual(logcat_service.adb_logcat_file_path,
expected_log_path)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test_instantiation(self, MockFastboot, MockAdbProxy):
"""Verifies the AndroidDevice object's basic attributes are correctly
set after instantiation.
"""
mock_serial = 1
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
self.assertIsNone(logcat_service._adb_logcat_process)
self.assertIsNone(logcat_service.adb_logcat_file_path)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch('mobly.logger.get_log_line_timestamp',
return_value=MOCK_ADB_LOGCAT_END_TIME)
def test_cat_adb_log(self, mock_timestamp_getter, stop_proc_mock,
start_proc_mock, FastbootProxy, MockAdbProxy):
"""Verifies that AndroidDevice.cat_adb_log loads the correct adb log
file, locates the correct adb log lines within the given time range,
and writes the lines to the correct output file.
"""
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
# Direct the log path of the ad to a temp dir to avoid racing.
logcat_service._ad._log_path = self.tmp_dir
# Expect error if attempted to cat adb log before starting adb logcat.
expected_msg = ('.* Attempting to cat adb log when none'
' has been collected.')
with self.assertRaisesRegex(logcat.Error, expected_msg):
logcat_service.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
logcat_service.start()
utils.create_dir(ad.log_path)
mock_adb_log_path = os.path.join(
ad.log_path, 'adblog,%s,%s.txt' % (ad.model, ad.serial))
with io.open(mock_adb_log_path, 'w', encoding='utf-8') as f:
f.write(MOCK_ADB_LOGCAT)
logcat_service.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
cat_file_path = os.path.join(
ad.log_path, 'AdbLogExcerpts',
('some_test,02-29 14-02-20.123,%s,%s.txt') % (ad.model, ad.serial))
with io.open(cat_file_path, 'r', encoding='utf-8') as f:
actual_cat = f.read()
self.assertEqual(actual_cat, ''.join(MOCK_ADB_LOGCAT_CAT_RESULT))
# Stops adb logcat.
logcat_service.stop()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch('mobly.logger.get_log_line_timestamp',
return_value=MOCK_ADB_LOGCAT_END_TIME)
def test_cat_adb_log_with_unicode(self, mock_timestamp_getter,
stop_proc_mock, start_proc_mock,
FastbootProxy, MockAdbProxy):
"""Verifies that AndroidDevice.cat_adb_log loads the correct adb log
file, locates the correct adb log lines within the given time range,
and writes the lines to the correct output file.
"""
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
# Direct the log path of the ad to a temp dir to avoid racing.
logcat_service._ad._log_path = self.tmp_dir
# Expect error if attempted to cat adb log before starting adb logcat.
expected_msg = ('.* Attempting to cat adb log when none'
' has been collected.')
with self.assertRaisesRegex(logcat.Error, expected_msg):
logcat_service.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
logcat_service.start()
utils.create_dir(ad.log_path)
mock_adb_log_path = os.path.join(
ad.log_path, 'adblog,%s,%s.txt' % (ad.model, ad.serial))
with io.open(mock_adb_log_path, 'w', encoding='utf-8') as f:
f.write(MOCK_ADB_UNICODE_LOGCAT)
logcat_service.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
cat_file_path = os.path.join(
ad.log_path, 'AdbLogExcerpts',
('some_test,02-29 14-02-20.123,%s,%s.txt') % (ad.model, ad.serial))
with io.open(cat_file_path, 'r', encoding='utf-8') as f:
actual_cat = f.read()
self.assertEqual(actual_cat,
''.join(MOCK_ADB_UNICODE_LOGCAT_CAT_RESULT))
# Stops adb logcat.
logcat_service.stop()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_logpersist(self, MockFastboot,
MockAdbProxy):
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': True,
'logpersist.stop': True,
}[command]
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_has_calls([
mock.call('logpersist.stop --clear'),
mock.call('logpersist.start'),
])
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_user_build_device(self, MockFastboot,
MockAdbProxy):
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'user',
'ro.debuggable': '0',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': True,
'logpersist.stop': True,
}[command]
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_missing_all_logpersist(
self, MockFastboot, MockAdbProxy):
def adb_shell_helper(command):
if command == 'logpersist.start':
raise MOCK_LOGPERSIST_START_MISSING_ADB_ERROR
elif command == 'logpersist.stop --clear':
raise MOCK_LOGPERSIST_STOP_MISSING_ADB_ERROR
else:
return b''
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': False,
'logpersist.stop': False,
}[command]
mock_adb_proxy.shell.side_effect = adb_shell_helper
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_missing_logpersist_stop(
self, MockFastboot, MockAdbProxy):
def adb_shell_helper(command):
if command == 'logpersist.stop --clear':
raise MOCK_LOGPERSIST_STOP_MISSING_ADB_ERROR
else:
return b''
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': True,
'logpersist.stop': False,
}[command]
mock_adb_proxy.shell.side_effect = adb_shell_helper
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_has_calls([
mock.call('logpersist.stop --clear'),
])
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_missing_logpersist_start(
self, MockFastboot, MockAdbProxy):
def adb_shell_helper(command):
if command == 'logpersist.start':
raise MOCK_LOGPERSIST_START_MISSING_ADB_ERROR
else:
return b''
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': False,
'logpersist.stop': True,
}[command]
mock_adb_proxy.shell.side_effect = adb_shell_helper
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy')
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test_clear_adb_log(self, MockFastboot, MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
ad.adb.logcat = mock.MagicMock()
ad.adb.logcat.side_effect = adb.AdbError(
cmd='cmd',
stdout=b'',
stderr=b'failed to clear "main" log',
ret_code=1)
logcat_service = logcat.Logcat(ad)
logcat_service.clear_adb_log()
if __name__ == '__main__':
unittest.main()
|
"""
Module that implements pure-python equivalents of the functions in the
_speedups extension module.
"""
from numpy import clip, invert, isnan, isinf, array, transpose, zeros, \
compress, where, take, float32, ones_like
import numpy as np
import operator
def array_combine(a, b, op=operator.and_, func=lambda x: x):
""" Returns op(func(a), func(b)) if a and b are both not None;
if one is None, then returns func() on the non-None array;
if both are None, then returns None.
"""
if a is not None and b is not None:
return op(func(a), func(b))
elif a is not None:
return func(a)
elif b is not None:
return func(b)
else:
return None
def scatterplot_gather_points(index, index_low, index_high,
value, value_low, value_high,
index_mask=None, index_sel=None, index_sel_mask=None,
value_mask=None, value_sel=None, value_sel_mask=None):
"""
Takes index and value arrays, masks, and optional selection arrays,
and returns the list of points and corresponding selection mask for
those points.
Parameters
----------
index : float array (1D)
Array of indexes of the points
index_low : float or None
The minimum acceptable value in the index array
index_high : float or None
The maximum acceptable value in the index array
value : float array (1D)
Array of values of the points
value_low : float or None
The minimum acceptable value in the value array
value_high : float or None
The maximum acceptable value in the value array
Optional Parameters
-------------------
index_mask : bool or int array (1D)
Mask array for the indexes
index_sel : sequence of ints
A list/tuple/array of indices of selected positions in the index array
index_sel_mask : array of ints or bools
An mask array with True values indicating which points are selected
value_mask : bool or int array (1D)
Mask array for the values
value_sel : sequence of ints
A list/tuple/array of indices of selected positions in the value array
value_sel_mask : array of ints or bools
An mask array with True values indicating which points are selected
Returns
-------
points : float array (Nx2)
The points that match all the masking criteria
sel_mask : bool array (1D)
Mask indicating which indices in **points** are selected
"""
index_range_mask = (index_low < index) & (index < index_high)
value_range_mask = (value_low < value) & (value < value_high)
nan_mask = array_combine(index_mask, value_mask,
func = lambda x: invert(isnan(x)) & x)
if nan_mask is not None:
point_mask = nan_mask & index_range_mask & value_range_mask
else:
point_mask = index_range_mask & value_range_mask
points = transpose(array((index, value)))
# Handle the selection mask
selection_mask = array_combine(index_sel_mask, value_sel_mask)
if index_sel is None and value_sel is None:
pass
else:
if index_sel is not None and value_sel is not None:
mask2 = zeros(len(index), int)
mask2[index_sel] = 1
mask2[value_sel] &= 1
elif index_sel is not None:
mask2 = zeros(len(index), int)
mask2[index_sel] = 1
elif value_sel is not None:
mask2 = zeros(len(index), int)
mask2[value_sel] = 1
if selection_mask is None:
selection_mask = mask2
else:
selection_mask &= mask2
points = compress(point_mask, points, axis=0)
if selection_mask is not None:
selections = compress(point_mask, selection_mask)
else:
selections = None
return points, selections
def apply_selection_fade(mapped_image, mask, fade_alpha, fade_background):
'''Apply a selection fade to a colormapped image.
Parameters
----------
mapped_image : ndarray of uint8, shape (N,M,4)
The digitized rgba values
mask : ndarray of bool, shape (N,M,4)
The array of masked pixels
fade_alpha : float
The alpha value for the fade
fade_background : rgb888 tuple
The fade background
'''
imask = invert(mask)
if fade_alpha == 0:
mapped_image[imask,0:3] = fade_background
else:
ialpha = (1.0 - fade_alpha)
background = tuple(ialpha * x for x in fade_background)
image_region = mapped_image[imask,0:3]
image_region *= fade_alpha
image_region += background
mapped_image[imask,0:3] = image_region
def map_colors(data_array, steps, low, high, red_lut, green_lut, blue_lut,
alpha_lut):
'''Map colors from color lookup tables to a data array.
This is used in ColorMapper.map_screen
Parameters
----------
data_array : ndarray
The data array
steps: int
The number of steps in the color map (depth)
low : float
The low end of the data range
high : float
The high end of the data range
red_lut : ndarray of float32
The red channel lookup table
green_lut : ndarray of float32
The green channel lookup table
blue_lut : ndarray of float32
The blue channel lookup table
alpha_lut : ndarray of float32
The alpha channel lookup table
Returns
-------
rgba: ndarray of float32
The rgba values of data_array according to the lookup tables. The shape
of this array is equal to data_array.shape + (4,).
'''
range_diff = high - low
if range_diff == 0.0 or isinf(range_diff):
# Handle null range, or infinite range (which can happen during
# initialization before range is connected to a data source).
norm_data = 0.5*ones_like(data_array)
else:
norm_data = clip((data_array - low) / range_diff, 0.0, 1.0)
nanmask = isnan(norm_data)
norm_data = where(nanmask, 0, (norm_data * (steps-1)).astype(int))
rgba = zeros(norm_data.shape+(4,), float32)
rgba[...,0] = where(nanmask, 0, take(red_lut, norm_data))
rgba[...,1] = where(nanmask, 0, take(green_lut, norm_data))
rgba[...,2] = where(nanmask, 0, take(blue_lut, norm_data))
rgba[...,3] = where(nanmask, 0, take(alpha_lut, norm_data))
return rgba
|
def roll_dX(X):
op = {}
for i in range(X):
op[i+1] = 1.0/X
return op
def app(dyct,key,val):
if key in dyct:
dyct[key]+=val
else:
dyct[key]=val
return dyct
def p_add(a,b):
op = {}
for akey in a:
for bkey in b:
op = app(op, akey+bkey, a[akey]*b[bkey])
return op
def p_subtract(a,b):
op = {}
for akey in a:
for bkey in b:
op = app(op, akey-bkey, a[akey]*b[bkey])
return op
def p_multiply(a,b):
op = {}
for akey in a:
for bkey in b:
op = app(op, akey*bkey, a[akey]*b[bkey])
return op
def p_divide(a,b):
op = {}
for akey in a:
for bkey in b:
op = app(op, akey/bkey, a[akey]*b[bkey])
return op
def p_min(a,b):
op = {}
for akey in a:
for bkey in b:
op = app(op, min(akey,bkey), a[akey]*b[bkey])
return op
def amt_greater(a,b): #what is the probability that a>b?
greater = 0
for akey in a:
for bkey in b:
if akey>bkey:
greater += a[akey]*b[bkey]
return greater
def p_minusdivide(a,b): #special thing for Flame
op={}
for akey in a:
for bkey in b:
op = app(op, akey-akey/bkey, a[akey]*b[bkey])
return op
def p_minusdivide_II(a,b): #special thing for Ocean
op={}
for akey in a:
for bkey in b:
op = app(op, akey - akey*bkey/100, a[akey]*b[bkey])
return op
Solar = p_subtract(p_add({45:1.0}, roll_dX(2)), roll_dX(2))
Lunar = {16:1.0}
World = p_subtract(p_add({77:1.0}, roll_dX(4)), roll_dX(4))
split = p_add({15:1.0},roll_dX(80))
print(split)
Earth = p_divide(p_multiply(World, split), {100:1.0})
Ocean = p_minusdivide_II(World, split)
print(World)
print(Earth)
print(Ocean)
Breeze = p_subtract(p_add({13:1.0}, roll_dX(4)), roll_dX(4))
Flame = {30:1.0} #day 374
Flame_addon = p_min(roll_dX(20), roll_dX(20))
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon) #day 375
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon) #day 376
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon) #day 377
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon) #day 378
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon) #day 379
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon) #day 380
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon) #day 381
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon) #day 382
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon) #day 383
FlameAndAsh = p_add(Flame, Flame_addon)
Ash = p_divide(Flame, {4:1.0})
Flame = p_add(p_minusdivide(Flame, {4:1.0}),Flame_addon)
for thing in Flame:
print(thing)
for thing in Flame:
print(Flame[thing])
for thing in Ash:
print(thing)
for thing in Ash:
print(Ash[thing])
Void = p_subtract(p_add({24:1.0}, roll_dX(8)), roll_dX(8))
Doom = p_add({27:0.99, 99:0.01}, roll_dX(6))
Spite = {0:1.0}
manas = {"Solar":Solar, "Lunar":Lunar, "Ocean":Ocean, "Breeze":Breeze, "Flame":Flame, "Ash":Ash, "Earth":Earth, "Void":Void, "Doom":Doom, "Spite":Spite}
badmanas = ["Void", "Doom", "Spite"]
outcomes={}
for mana1 in manas:
outcomes[mana1]={}
for mana2 in manas:
outcomes[mana1][mana2] = {"Demon":0, "Failure":0, "Success":0}
if (mana1=="Flame" and mana2=="Ash") or (mana1=="Ash" and mana2=="Flame"):
outcomes[mana1][mana2]["Success"] = amt_greater(FlameAndAsh,{69:1.0})
outcomes[mana1][mana2]["Failure"] = 1.0 - outcomes[mana1][mana2]["Success"]
elif (mana1=="Earth" and mana2=="Ocean") or (mana1=="Ocean" and mana2=="Earth"):
outcomes[mana1][mana2]["Success"] = amt_greater(World,{69:1.0})
outcomes[mana1][mana2]["Failure"] = 1.0 - outcomes[mana1][mana2]["Success"]
elif mana1!=mana2:
for key1 in manas[mana1]:
for key2 in manas[mana2]:
if (((mana1 in badmanas) and (key1>key2)) or ((mana2 in badmanas) and (key1<key2))):
outcomes[mana1][mana2]["Demon"] += manas[mana1][key1]*manas[mana2][key2]
else:
if (key1+key2)>69: #nice
outcomes[mana1][mana2]["Success"] += manas[mana1][key1]*manas[mana2][key2]
else:
outcomes[mana1][mana2]["Failure"] += manas[mana1][key1]*manas[mana2][key2]
print(outcomes)
prettyOutcomes={}
for mana1 in manas:
for mana2 in manas:
if mana1!=mana2:
prettyOutcomes[mana1+" "+mana2] = {}
prettyOutcomes[mana1+" "+mana2]["Demon"] = str(round(outcomes[mana1][mana2]["Demon"]*100,2))+"%"
prettyOutcomes[mana1+" "+mana2]["Failure"] = str(round(outcomes[mana1][mana2]["Failure"]*100,2))+"%"
prettyOutcomes[mana1+" "+mana2]["Success"] = str(round(outcomes[mana1][mana2]["Success"]*100,2))+"%"
print(mana1,mana2,prettyOutcomes[mana1+" "+mana2])
print(prettyOutcomes)
|
#!/usr/bin/env python3
import numpy as np
from dataclasses import dataclass
from typing import Tuple
@dataclass
class PIDSettings:
""" PID Controller Settings. """
kp: float
ki: float
kd: float
max_i: float # windup
max_u: float # max effort
cutoff_freq: float # used for derivative filter coef
def _filter_coefs(c: float):
""" Premultiplied butterworth filter coefs """
s = (1 / (1 + c * c + 1.414 * c))
w = [1.0, 2.0, 1.0, -(c * c - 1.414 * c + 1), -(-2 * c * c + 2)]
return np.multiply(w, s)
class PID(object):
"""
Simple PID class.
Supported features:
* Max Windup clamping
* Smooth derivative (2nd order butterworth)
* Vectorized operation - (input doesn't need to be scalars)
Reference:
https://bitbucket.org/AndyZe/pid/src/master/
"""
def __init__(self, settings: PIDSettings):
self.settings = settings
# ~raw state variables...
self.error_ = None
self.error_i_ = None
self.error_d_ = None
# filtered (smooth) state.
self.f_error_ = None
self.f_error_d_ = None
@property
def kp(self):
return self.settings.kp
@property
def ki(self):
return self.settings.ki
@property
def kd(self):
return self.settings.kd
def set_gains(self, kp: float, ki: float, kd: float):
self.settings.kp = kp
self.settings.ki = ki
self.settings.kd = kd
def set_max_i(self, max_i: float):
self.settings.max_i = max_i
def reset(self, soft=True):
if soft:
if self.error_ is not None:
self.error_.fill(0.0)
self.error_i_.fill(0.0)
self.error_d_.fill(0.0)
self.f_error_.fill(0.0)
self.f_error_d_.fill(0.0)
else:
self.error_ = None
self.error_i_ = None
self.error_d_ = None
self.f_error_ = None
self.f_error_d_ = None
def _allocate(self, shape: Tuple[int, ...]):
# NOTE(ycho): `3` here is just the buffer length for
# maintaining a smooth derivative.
self.error_ = np.zeros((3,) + shape, np.float32) # 3xN
self.error_i_ = np.zeros(shape, np.float32) # N
self.error_d_ = np.zeros_like(self.error_) # 3xN
# Filtered ...
self.f_error_ = np.zeros_like(self.error_) # 3xN
self.f_error_d_ = np.zeros_like(self.error_d_) # 3xN
def __call__(self, err: float, dt: float):
# If this is the first invocation since reset,
# Configure the controller buffers.
if self.error_ is None:
self._allocate(np.shape(err))
# Set the current error.
self.error_ = np.roll(self.error_, -1, axis=0)
self.error_[-1] = err
# Apply numerical integration and clip the results.
self.error_i_ += self.error_[-1] * dt
self.error_i_ = np.clip(
self.error_i_, -self.settings.max_i, self.settings.max_i,
out=self.error_i_)
# Apply (smooth) numerical differentiation.
t = np.tan((self.settings.cutoff_freq * 2 * np.pi) * 0.5 * dt)
# FIXME(ycho): Remove hardcoded epsilon (0.01),
# Or reparametrize filter coefficients to be numerically stable at or
# near 0 (if applicable).
if np.abs(t) <= 0.01:
t = 0.01 * np.sign(t)
c = 1.0 / t
k = _filter_coefs(c)
self.f_error_ = np.roll(self.f_error_, -1, axis=0)
self.f_error_[-1] = k.dot(np.r_[self.error_, self.f_error_[:2]])
self.error_d_ = np.roll(self.error_d_, -1, axis=0)
self.error_d_[-1] = (1.0 / dt) * (self.error_[2] - self.error_[1])
self.f_error_d_ = np.roll(self.f_error_d_, -1, axis=0)
self.f_error_d_[-1] = k.dot(np.r_[self.error_d_,
self.f_error_d_[:2]])
# Collect contributions.
u_p = self.kp * self.error_[-1]
u_i = self.ki * self.error_i_
u_d = self.kd * self.f_error_d_[-1]
u = np.zeros_like(u_p)
if self.kp > 0:
u += u_p
if self.ki > 0:
u += u_i
if np.abs(self.kd) > 0:
u += u_d
# Clip output, and return.
u = np.clip(u, -self.settings.max_u, self.settings.max_u, out=u)
return u
|
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView, RedirectView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import redirect_to_login
from django.shortcuts import get_object_or_404
from django.contrib.auth import get_user_model
from django.utils import timezone
from .models import Draft, Tag, Comment, Activity
from .forms import DraftForm
from drafthub.utils import PageContext
Blog = get_user_model()
class QueryFromBlog:
def get_queryset(self):
return Draft.objects.filter(
blog__username=self.kwargs['blog'])
class AccessRequired:
def _user_has_access(self, request):
if request.user.is_authenticated:
self.object = self.get_object()
return request.user == self.object.blog
return redirect_to_login(request.get_full_path())
def dispatch(self, request, *args, **kwargs):
if not self._user_has_access(request):
return redirect(self.object)
return super().dispatch(request, *args, **kwargs)
class DraftCreateView(LoginRequiredMixin, CreateView):
form_class = DraftForm
template_name = 'draft/new.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
page_meta = PageContext(self.request)
page_meta.title = 'publishing a new article'
context.update(page_meta.context)
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
form.instance.blog = self.request.user
form.instance.slug = self._get_slug(form.instance)
form.instance.did = self._get_did(form.instance)
form.save()
self._set_tags(form)
return super().form_valid(form)
def _get_slug(self, instance, unique_len=6):
from django.utils.text import slugify
from .utils import generate_random_string
max_length = Draft._meta.get_field('slug').max_length
author = instance.blog.username
non_unique_slug = slugify(instance.title)
non_unique_slug = non_unique_slug[: max_length - unique_len - 1]
if non_unique_slug.endswith('-'):
non_unique_slug = non_unique_slug[:-1]
slug = non_unique_slug
while Draft.objects.filter(did=instance.get_did(author,slug)).exists():
unique = generate_random_string()
slug = non_unique_slug + '-' + unique
return slug
def _set_tags(self, form):
tags = form.cleaned_data['tags']
draft = form.instance
if tags:
for tag_name in tags:
tag, created = Tag.objects.get_or_create(name=tag_name)
draft.tags.add(tag)
def _get_did(self, instance):
blog = instance.blog
slug = instance.slug
return instance.get_did(blog, slug)
class DraftDetailView(QueryFromBlog, DetailView):
model = Draft
template_name = 'draft/draft.html'
context_object_name = 'draft'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
draft = self.get_object()
page_meta = PageContext(self.request)
page_meta.author = draft.blog.username
page_meta.title = draft.title
page_meta.description = draft.description
page_meta.keywords = ', '.join([tag.name for tag in draft.tags.all()])
page_meta.image = draft.image
context.update(page_meta.context)
return context
def get_object(self):
obj = super().get_object()
referer = self.request.META.get('HTTP_REFERER') or ''
if not self.request.user in obj.views:
if not obj.get_absolute_url() in referer:
obj.hits += 1
obj.save(update_fields=['hits'])
if self.request.user.is_authenticated:
activity, created = Activity.objects.get_or_create(
blog=self.request.user,
draft=obj
)
if not created:
activity.save(update_fields=['viewed'])
if self.request.user.social_auth.exists():
obj.updated = self._get_updated(obj)
if obj.updated:
obj.save(update_fields=['updated'])
return obj
def _get_updated(self, obj):
import requests
import json
from django.utils.dateparse import parse_datetime
from .utils import get_data_from_url
user = self.request.user
social_user = self.request.user.social_auth.get()
extra_data = social_user.extra_data
token = extra_data['access_token']
data = get_data_from_url(obj.github_url)
endpoint = 'https://api.github.com/graphql'
query = f"""query {{
viewer {{
login
}}
rateLimit {{
limit
cost
remaining
}}
repository(owner: "{data['login']}", name: "{data['repo']}"){{
object(expression: "{data['branch']}"){{
... on Commit {{
history(path: "{data['name']}", first:1){{
edges {{
node {{
message
oid
author {{
date
user {{
name
url
login
isViewer
}}
}}
}}
}}
}}
}}
}}
}}
}}"""
headers = {'Authorization': f'bearer {token}'}
GraphiQL_connect = requests.post(
endpoint,
json={'query': query},
headers=headers
)
api_data = json.loads(GraphiQL_connect.text)
last_commit = api_data['data']['repository']['object']['history']
last_commit = last_commit['edges'][0]['node']['author']['date']
last_commit = parse_datetime(last_commit)
tzinfo = last_commit.tzinfo
last_commit = last_commit.replace(tzinfo=tzinfo).astimezone(tz=None)
if last_commit > obj.created:
return last_commit
return None
class DraftUpdateView(QueryFromBlog, AccessRequired, LoginRequiredMixin,
UpdateView):
form_class = DraftForm
template_name = 'draft/form.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_type': 'draft_edit',
})
return context
def form_valid(self, form):
form.instance.tags.clear()
self._set_tags(form)
return super().form_valid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_initial(self):
initial = super().get_initial()
tags = self.object.tags.all().values_list('name', flat=True)
initial.update({
'tags': ', '.join(tags),
})
return initial
def _set_tags(self, form):
tags = form.cleaned_data['tags']
draft = form.instance
if tags:
for tag_name in tags:
tag, created = Tag.objects.get_or_create(name=tag_name)
draft.tags.add(tag)
class DraftDeleteView(QueryFromBlog, AccessRequired, LoginRequiredMixin,
DeleteView):
model = Draft
template_name = 'draft/form.html'
context_object_name = 'draft'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_type': 'draft_delete',
})
return context
def get_success_url(self):
args = (self.kwargs['blog'],)
return reverse_lazy('blog', args=args)
class CommentCreateView(LoginRequiredMixin, CreateView):
model = Comment
fields = ['content']
template_name = 'draft/form.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_type': 'comment_create',
'comment_draft': get_object_or_404(
Draft,
slug=self.kwargs['slug'],
blog__username=self.kwargs['blog']
)
})
return context
def get_initial(self):
initial = super().get_initial()
if 'quote' in self.request.GET.keys():
quote_comment_pk = self.request.GET['quote']
comment = Comment.objects.filter(pk=quote_comment_pk)
if not comment.exists():
return initial
comment = comment.get()
quoted_content = '\n> '.join(comment.content.splitlines())
initial['content'] = \
'> **[{} said:](#{})** \n> {}\n\n'.format(
comment.blog.username,
comment.pk,
quoted_content,
)
return initial
def form_valid(self, form):
form.instance.blog = self.request.user
form.instance.draft = get_object_or_404(
Draft,
slug=self.kwargs['slug'],
blog__username=self.kwargs['blog']
)
form.save()
return super().form_valid(form)
class CommentUpdateView(AccessRequired, LoginRequiredMixin, UpdateView):
model = Comment
template_name = 'draft/form.html'
fields = ['content']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_type': 'comment_edit',
'comment_draft':get_object_or_404(
Draft,
slug=self.kwargs['slug'],
blog__username=self.kwargs['blog']
)
})
return context
def form_valid(self, form):
form.instance.updated = timezone.now()
form.save()
return super().form_valid(form)
class CommentDeleteView(AccessRequired, LoginRequiredMixin, DeleteView):
model = Comment
template_name = 'draft/form.html'
context_object_name = 'comment'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_type': 'comment_delete',
'comment_draft':get_object_or_404(
Draft,
slug=self.kwargs['slug'],
blog__username=self.kwargs['blog']
)
})
return context
def get_success_url(self):
kwargs = {
'blog': self.kwargs['blog'],
'slug': self.kwargs['slug'],
}
return reverse_lazy('draft', kwargs=kwargs)+"#third-content"
class LikeRedirectView(LoginRequiredMixin, RedirectView):
def get_redirect_url(self, *args, **kwargs):
slug = self.kwargs.get('slug')
blog = self.kwargs.get('blog')
obj = get_object_or_404(Draft, slug=slug, blog__username=blog)
activity = Activity.objects.filter(blog=self.request.user, draft=obj)
if activity.exists():
activity = activity.get()
if activity.liked:
activity.liked = None
else:
activity.liked = timezone.now()
activity.save(update_fields=['liked'])
else:
activity = Activity(blog=self.request.user, draft=obj)
activity.liked = timezone.now()
activity.save()
return obj.get_absolute_url()
class FavoriteRedirectView(LoginRequiredMixin, RedirectView):
def get_redirect_url(self, *args, **kwargs):
slug = self.kwargs.get('slug')
blog = self.kwargs.get('blog')
obj = get_object_or_404(Draft, slug=slug, blog__username=blog)
activity = Activity.objects.filter(blog=self.request.user, draft=obj)
if activity.exists():
activity = activity.get()
if activity.favorited:
activity.favorited = None
else:
activity.favorited = timezone.now()
activity.save(update_fields=['favorited'])
else:
activity = Activity(blog=self.request.user, draft=obj)
activity.favorited = timezone.now()
activity.save()
return obj.get_absolute_url()
class TagListView(ListView):
model = Draft
template_name = 'draft/tag.html'
context_object_name = 'tag_drafts'
paginate_by = 20
def get_queryset(self):
self.tag = get_object_or_404(Tag, name=self.kwargs['tag'])
return self.model.objects.filter(tags=self.tag)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'tag': self.tag,
})
return context
class TopicsListView(ListView):
model = Draft
template_name = 'draft/topics.html'
context_object_name = 'topics_drafts'
paginate_by = 5
def get_queryset(self):
sort = self.request.GET.get('s')
drafts = Draft.objects.all()
if sort == 'latest':
self.heading = 'latest'
self.description = 'The latest published articles'
return drafts.order_by('-created')
elif sort == 'updated':
self.heading = 'updated'
self.description = 'The latest updated articles'
return drafts.filter(updated__isnull=False).order_by('-updated')
self.heading = 'popular'
self.description = 'The most popular articles today'
return drafts
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
n_tags = 7
tags = Tag.objects.all()
popular_tags_by_name = [tag.name for tag in tags[:n_tags]]
page_meta = PageContext(self.request)
page_meta.keywords = ', '.join(popular_tags_by_name)
context.update({
'tags_popular': tags[:n_tags],
'heading': self.heading,
'description': self.description,
**page_meta.context,
})
return context
# API
from django.http import JsonResponse
def tag_list_api(request):
tag_list = [tag.name for tag in Tag.objects.all()]
return JsonResponse({'tags': tag_list})
def render_markdown_api(request):
from .utils import markdown
try:
url = request.GET.get('url')
return JsonResponse({'markdown': markdown(url)})
except:
if url == '':
return JsonResponse({'markdown': ''})
return JsonResponse({'markdown': 'No data could be retrieved.'})
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.modules.multihead_attention import MultiheadAttention
class TestMultiheadAttention(unittest.TestCase):
def test_append_prev_key_padding_mask(self):
bsz = 1
src_len = 4
cases = [
# no padding mask
(None, None, None),
# current padding mask only
(
torch.tensor([[1]]).bool(),
None,
torch.tensor([[0, 0, 0, 1]]).bool(),
),
# previous padding mask only
(
None,
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 0]]).bool(),
),
# both padding masks
(
torch.tensor([[1]]).bool(),
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 1]]).bool(),
),
]
for c in cases:
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
c[0],
c[1],
batch_size=bsz,
src_len=src_len,
static_kv=False,
)
if key_padding_mask is not None:
self.assertTrue(
torch.all(torch.eq(key_padding_mask, c[2])),
f"Unexpected resultant key padding mask: {key_padding_mask}"
f" given current: {c[0]} and previous: {c[1]}",
)
self.assertEqual(key_padding_mask.size(0), bsz)
self.assertEqual(key_padding_mask.size(1), src_len)
else:
self.assertIsNone(c[2])
if __name__ == "__main__":
unittest.main()
|
from datetime import date
from datetime import datetime as dt
from functools import reduce
import unittest
from context import utils
from context import data_driven_design as ddd
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
#from .context import data_driven_design as ddd
class TestDataDrivenDesign(unittest.TestCase):
def test_getRandomAnnuals(self):
def tallyReduceFn(tally, year):
tally[year] = 1 if year not in tally else tally[year] + 1
return tally
k = 3
bultenler = ddd.getRandomAnnuals(k)
cur_year = date.today().year
self.assertEqual(len(bultenler), k * (cur_year - 2010 + 1))
years = list(map(lambda bulten: bulten.getYear(), bultenler))
yearTally = reduce(tallyReduceFn, years, dict())
expectedYears = [i//k for i in range(2010*k, k*(cur_year + 1))]
expectedTally = reduce(tallyReduceFn, expectedYears, dict())
self.assertEqual(yearTally,expectedTally)
#def test_inspectPageLayout(self):
# ddd.inspectPageLayout(20)
def test_getIhaleList(self):
dates = ["01.04.2021"]
bultenler = ddd.getBultensByDates(dates)
iknIndex={
'2021/166678':{
'ikn':'2021/166678',
'idare adi':'Gaziantep Su ve Kanalizasyon İdaresi (GASKİ) Genel Müdürlüğü',
'idare adresi':'İncilipınar Mah. Kıbrıs Caddesi Eruslu İş Merkezi 27000 \nŞehitkamil/Gaziantep',
'idare telefon ve faks numarasi':'3422111300 - 3422318833',
'kaynak':'https://ekap.kik.gov.tr/EKAP/',
'mal adi':'Motorin Alım İşi',
'mal niteligi':'Genel Müdürlüğümüz Bünyesindeki Araç, İş Makinesi, Dizel \nJeneratör ve Diğer Dizel Ekipmanlarda Kullanılmak Üzere \n2.500.000 Litre Motorin Alınacaktır. \nAyrıntılı bilgiye EKAP’ta yer alan ihale dokümanı içinde bulunan \nidari şartnameden ulaşılabilir.',
'mal teslim yeri':'GASKİ Genel Müdürlüğü/25 Aralık İşletme Tesisleri',
'mal teslim tarihi':'İdarenin ihtiyacı doğrultusunda peyder pey sözleşme süresi \niçerisinde malın tamamı teslim edilecektir.',
'ise baslama tarihi':'Sözleşmenin imzalanmasına müteakip 10 gün içinde işe \nbaşlanacaktır.',
'son teklif tarih ve saati':'04.05.2021 - 14:00',
'komisyon toplanti yeri':'GASKİ Genel Müdürlüğü/Destek Hizmetleri Daire Başkanlığı/'
}, '2021/172451':{
'ikn':'2021/172451',
'idare adresi':'Kayaönü Mah. 42035 Nolu Cad. No:40 27060 Şehitkamil/Gaziantep',
'idare telefon ve faks numarasi':'3422209614 - 3422209622',
'idare e-posta adresi':'gaziantep.ihale@saglik.gov.tr',
'kaynak':'https://ekap.kik.gov.tr/EKAP/',
'mal niteligi':'240.000 Litre Akaryakıt (Motorin) Alımı. Toplam 240.000 Litre \nYakıt Talebinin; 220.000 Litresi El-Bab Hastanesi ve Bağlı Sağlık \nMerkezleri İhtiyacı İçin, 20.000 Litresi Cerablus Hastanesi ve Bağlı \nSağlık Merkezleri İhtiyaçları İçindir. \nAyrıntılı bilgiye EKAP’ta yer alan ihale dokümanı içinde bulunan \nidari şartnameden ulaşılabilir.',
'mal teslim yeri':'Müdürlüğümüz, Suriye Görev Gücü Başkanlığına bağlı El-Bab \nHastanesi ve bağlı sağlık merkezleri ile Cerablus Hastanesi ve \nbağlı sağlık merkezlerine ait, Jeneratörler ve Araçlara ait yakıt \nihtiyaçlarını, ilgili İdareler sözleşme süresince yükleniciden istediği \nşekilde ve oranda peyder pey olarak talep edecek ve yüklenici \nidarenin istediği şekilde teslim edilecektir.',
'mal teslim tarihi':'Müdürlüğümüz, Suriye Görev Gücü Başkanlığına bağlı El-Bab \nHastanesi ve bağlı Sağlık Merkezleri ile Cerablus Hastanesi ve bağlı \nSağlık Merkezinin Jeneratörler ve Hizmet Araçlarına ait peyder pey \nolarak talep edeceği yakıt ihtiyaçlarını, Hastanelerde jeneratörler \n7/24 saat aktif olarak çalışacağından dolayı, jeneratörler için talep \nedilecek yakıt ihtiyaçlarını yükleniciye talebin bildirmesine \nmüteakip, acil durumlarda aynı gün, acil olmayan durumlarda ise \nen geç beş (5) iş günü içerisinde ilgili hastanenin tanklarına idarenin \nistediği şekilde boşaltacak ve sorunsuz bir şekilde teslim edecektir. \nAyrıca, yüklenici sözleşme süresince idarelerin hizmet araçlarına \notomatik taşıt tanıma sistemini ücretsiz olarak takacak olup, \nAraçların yakıt ihtiyacını 7 (yedi) gün 24 saat nispetinde idarenin \nistediği şekilde yakıt verecektir.',
'ihale yeri':'Gaziantep İl Sağlık Müdürlüğü A-Blok 1.Kat İhale Salonu \n(Kayaönü Mah. 42035 Nolu Cad. No:40 Şehitkâmil/Gaziantep) - \n(İpek Yolu Üzeri Safir Otel Bitişiği)',
'son teklif tarih ve saati':'29.04.2021 - 10:00'
}
}
for i in bultenler:
for ihale in i.getIhaleList():
self.assertEqual(ihale, iknIndex[ihale['ikn']])
class TestPdfParser(unittest.TestCase):
def test_getPage(self):
dates = ["31.01.2017"]
bultenler = ddd.getBultensByDates(dates)
for i in bultenler:
expectedPageId = 5
page = i.getPage(expectedPageId)
# pageid is one-indexed
self.assertEqual(page.pageid - 1, expectedPageId)
for i in bultenler:
expectedPageId = 2
page = i.getPage(expectedPageId)
self.assertEqual(page.pageid - 1, expectedPageId)
for i in bultenler:
expectedPageId = 20
page = i.getPage(expectedPageId)
self.assertEqual(page.pageid - 1, expectedPageId)
for i in bultenler:
expectedPageId = 10
page = i.getPage(expectedPageId)
self.assertEqual(page.pageid - 1, expectedPageId)
def test_textSearcher(self):
# 10 dates
dates = ["31.01.2017","30.06.2016","29.03.2013"]
#dates = ["31.01.2017","30.06.2016","29.03.2013","28.11.2014","26.06.2013","24.04.2015","01.07.2019","01.04.2021","04.11.2020", "05.07.2018"]
expectedTally = {"31.01.2017": 36, "30.06.2016": 23, "29.03.2013": 39, "28.11.2014": 10, "26.06.2013": 10, "24.04.2015": 21, "01.07.2019": 14,"01.04.2021":36,"04.11.2020":16, "05.07.2018":36}
bultenler = ddd.getBultensByDates(dates)
# no cursor
textSearchers = map(lambda bulten: (dt.strftime(bulten.getDate(), "%d.%m.%Y"), bulten.textSearcher('temizlik')), bultenler)
actualTally = {}
for dateStr, searcher in textSearchers:
for find in searcher:
foundTxt = utils.asciify(find.get_text().lower())
count = foundTxt.count('temizlik')
log.debug(f'There are {count} counts of the text search query within this component')
actualTally[dateStr] = count if dateStr not in actualTally else actualTally[dateStr] + count
self.assertEqual(expectedTally[dateStr], actualTally[dateStr])
|
import unittest
from iobeam.resources import device
_PROJECT_ID = 1
_DEVICE_ID = "py_test_id"
_DEVICE_NAME = "py_test_device"
class TestDevice(unittest.TestCase):
def test_validConstructor(self):
d = device.Device(_PROJECT_ID, _DEVICE_ID, deviceName=_DEVICE_NAME)
self.assertEqual(_PROJECT_ID, d.projectId)
self.assertEqual(_DEVICE_ID, d.deviceId)
self.assertEqual(_DEVICE_NAME, d.deviceName)
def test_validConstructorNoName(self):
d = device.Device(_PROJECT_ID, _DEVICE_ID)
self.assertEqual(_PROJECT_ID, d.projectId)
self.assertEqual(_DEVICE_ID, d.deviceId)
self.assertTrue(d.deviceName is None)
def test_invalidProjectId(self):
# None is not a valid project id
badId = None
try:
d = device.Device(badId, _DEVICE_ID)
self.assertTrue(False)
except ValueError as e:
self.assertEqual("projectId must be an int", str(e))
# No non-int project ids
badId = "50"
try:
d = device.Device(badId, _DEVICE_ID)
self.assertTrue(False)
except ValueError as e:
self.assertEqual("projectId must be an int", str(e))
# No negative project ids
badId = -1
try:
d = device.Device(badId, _DEVICE_ID)
self.assertTrue(False)
except ValueError as e:
self.assertEqual("projectId must be greater than 0", str(e))
# 0 is not a valid project id
badId = 0
try:
d = device.Device(badId, _DEVICE_ID)
self.assertTrue(False)
except ValueError as e:
self.assertEqual("projectId must be greater than 0", str(e))
def test_invalidDeviceId(self):
# None is not a valid device id
badId = None
try:
d = device.Device(_PROJECT_ID, badId)
self.assertTrue(False)
except ValueError as e:
self.assertEqual("deviceId must be a string", str(e))
# Must be a str
badId = 1
try:
d = device.Device(_PROJECT_ID, badId)
self.assertTrue(False)
except ValueError as e:
self.assertEqual("deviceId must be a string", str(e))
# Cannot be 0-length
badId = ""
try:
d = device.Device(_PROJECT_ID, badId)
self.assertTrue(False)
except ValueError as e:
self.assertEqual("deviceId must be more than 0 characters", str(e))
|
import argparse
import torch.nn.functional as F
from .. import load_graph_data
from ..train import train_and_eval
from ..train import register_general_args
from .gat import GAT
def gat_model_fn(args, data):
heads = ([args.n_heads] * args.n_layers) + [args.n_out_heads]
return GAT(data.graph,
args.n_hidden_layers,
data.n_feats,
args.n_hidden_units,
data.n_classes,
heads,
F.elu,
args.in_drop,
args.attn_drop,
args.negative_slope,
args.residual)
def register_gat_args(parser):
parser.add_argument("--n-hidden-units", type=int, default=16,
help="number of hidden gcn units")
parser.add_argument("--n-hidden-layers", type=int, default=1,
help="number of hidden gat layers")
parser.add_argument("--n-heads", type=int, default=8,
help="number of hidden attention heads")
parser.add_argument("--n-out-heads", type=int, default=1,
help="number of output attention heads")
parser.add_argument("--in-drop", type=float, default=.6,
help="input feature dropout")
parser.add_argument("--attn-drop", type=float, default=.6,
help="attention dropout")
parser.add_argument("--residual", action="store_true", default=False,
help="use residual connection")
parser.add_argument('--negative-slope', type=float, default=0.2,
help="the negative slope of leaky relu")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GAT')
register_general_args(parser)
register_gat_args(parser)
args = parser.parse_args()
print('Parsed args:', args)
train_and_eval(gat_model_fn, load_graph_data.load(args), args)
|
import os
import json
import dateutil.parser
from urllib.parse import urlparse
from datetime import datetime
from celery import shared_task
from django.conf import settings
from django.utils.timezone import utc
@shared_task
def load_data_to_db(paren_result, session_id):
from ..models import Site, Page, Article, ArticleSnapshot, Session
session = Session.objects.get(id=session_id)
session.load_started = datetime.utcnow().replace(tzinfo=utc)
session.save()
site = None
feed_file = os.path.join(settings.DATA_DIR, f"{session_id}.json")
with open(feed_file) as file:
for line in file:
row = json.loads(line)
# convert some data to pythonic
timestamp = dateutil.parser.parse(row['timestamp'])
url = urlparse(row['url'])
try:
publish_date = dateutil.parser.parse(row['publish_date'])
except Exception:
publish_date = None
# site
if not site or site.url == url.netloc:
site, created = Site.objects.get_or_create(url=url.netloc)
# page
try:
page = Page.objects.get(site=site, url=url.path)
if page.last_visit < timestamp:
page.last_visit = timestamp
page.save()
except Page.DoesNotExist:
page = Page(site=site, url=url.path, last_visit=timestamp)
page.save()
# article
try:
article = Article.objects.get(site=site, idx=row['idx'])
# TODO: update the article
if article.last_updated < timestamp:
article.last_updated = timestamp
article.save()
except Article.DoesNotExist:
article = Article( site = site,
idx = row['idx'],
last_updated = timestamp,
title = row['title'],
body = row['body'],
publish_date = publish_date)
article.save()
# article snapshot
snapshot = ArticleSnapshot( session = session,
page = page,
article = article,
timestamp = timestamp,
title = row['title'],
body = row['body'],
publish_date = publish_date)
try:
snapshot.save()
except Exception:
# TODO: is bad, ignore now
pass
session.load_finished = datetime.utcnow().replace(tzinfo=utc)
session.save()
|
# Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from valid_short import _schema
class TestValidShort(unittest.TestCase):
def test_valid_short(self):
r = _schema.parse_file('src/fixed_struct.bin')
|
import re
from avmess.controllers.login import LoginHandler
from avmess.controllers.register import RegisterHandler
from avmess.controllers.messages import MessageHandler
from avmess.controllers.connect import ConnectionHandler
from avmess.controllers.rooms import RoomHandler
class Router(object):
urlpatterns = [
(r'^/login/?$', LoginHandler),
(r'^/register/?$', RegisterHandler),
(r'^/messages/?$', MessageHandler),
(r'^/messages(/?(?P<id>[0-9]*))/?$', MessageHandler),
(r'^/connect/?$', ConnectionHandler),
(r'^/rooms/?$', RoomHandler),
(r'^/rooms(/?(?P<id>[0-9]*))/?$', RoomHandler),
]
@classmethod
def get_handler(cls, url):
for url_regex, handler in cls.urlpatterns:
match = re.match(url_regex, url)
if match:
return handler, match.groupdict()
return None, None
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-09 13:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=200)),
('ammount', models.DecimalField(decimal_places=2, default=0.0, max_digits=15)),
],
),
migrations.CreateModel(
name='Tax',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name_plural': 'Taxes',
},
),
migrations.CreateModel(
name='TaxDefinition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('rate', models.DecimalField(decimal_places=2, default=10.0, max_digits=15)),
],
),
migrations.CreateModel(
name='Expense',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='accounting.Record')),
],
bases=('accounting.record',),
),
migrations.CreateModel(
name='Income',
fields=[
('record_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='accounting.Record')),
],
bases=('accounting.record',),
),
migrations.AddField(
model_name='tax',
name='record',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounting.Record'),
),
]
|
#!/usr/bin/env python3
import OpenGL.GL as GL
import numpy as np
import assimpcy
from core.shader import Shader
from core.mesh import Mesh
VERTEX_SHADER_NAME = 'model/model.vert'
FRAGMENT_SHADER_NAME = 'model/model.frag'
class Model(Mesh):
def __init__(self, lights_manager, attributes, index=None,
k_a=(0, 0, 0), k_d=(1, 0, 0), k_s=(1, 1, 1), s=16.0):
self.lights_manager = lights_manager
shader = Shader(VERTEX_SHADER_NAME, FRAGMENT_SHADER_NAME)
self.lights_manager.add_shader(shader)
super().__init__(shader, attributes, index)
self.k_a = k_a
self.k_d = k_d
self.k_s = k_s
self.s = s
self.add_locations('normal_matrix', 'k_a', 'k_d', 'k_s', 's', 'w_camera_position')
position = attributes[0]
self.bounds = (np.min(position, axis=0), np.max(position, axis=0))
def draw(self, projection, view, model, normal_matrix, camera):
GL.glUseProgram(self.shader.glid)
self.lights_manager.set_uniforms(self.shader, 0)
GL.glUniformMatrix4fv(self.locations['normal_matrix'], 1, True, normal_matrix)
GL.glUniform3fv(self.locations['k_a'], 1, self.k_a)
GL.glUniform3fv(self.locations['k_d'], 1, self.k_d)
GL.glUniform3fv(self.locations['k_s'], 1, self.k_s)
GL.glUniform1f(self.locations['s'], self.s)
GL.glUniform3fv(self.locations['w_camera_position'], 1, camera.position)
super().draw(projection, view, model, normal_matrix, camera)
def load_model(file, lights_manager, isSRGB=True):
""" load resources from file using assimp, return list of Model """
toLinearRGB = lambda color: np.power(color, 2.2) if isSRGB else color
try:
pp = assimpcy.aiPostProcessSteps
flags = pp.aiProcess_Triangulate | pp.aiProcess_GenSmoothNormals
scene = assimpcy.aiImportFile(file, flags)
except assimpcy.all.AssimpError as exception:
print('Error: loading \'{}\': {}'.format(file, exception.args[0].decode()))
return []
# prepare model nodes
models = []
for mesh in scene.mMeshes:
mat = scene.mMaterials[mesh.mMaterialIndex].properties
model = Model(lights_manager, [mesh.mVertices, mesh.mNormals], mesh.mFaces,
k_a=toLinearRGB(mat.get('COLOR_AMBIENT', (0, 0, 0))),
k_d=toLinearRGB(mat.get('COLOR_DIFFUSE', (1, 0, 0))),
k_s=toLinearRGB(mat.get('COLOR_SPECULAR', (0.5, 0.5, 0.5))),
s=mat.get('SHININESS', 16.))
models.append(model)
return models
|
import time
from unittest import TestCase
import numpy as np
from algorithms.agent import AgentRandom
from algorithms.tests.test_wrappers import TEST_ENV_NAME
from algorithms.topological_maps.topological_map import hash_observation
from utils.envs.doom.doom_utils import doom_env_by_name, make_doom_env
from utils.utils import log
class TestLandmarkEncoder(TestCase):
@staticmethod
def make_env():
return make_doom_env(doom_env_by_name(TEST_ENV_NAME))
def test_hashing(self):
env = self.make_env()
env.reset()
agent = AgentRandom(self.make_env, {})
trajectory = []
n_obs = 200
for i in range(n_obs):
obs, _, _, _ = env.step(agent.best_action())
trajectory.append(obs)
start_hashing = time.time()
hashes = []
for obs in trajectory:
obs_hash = hash_observation(obs)
hashes.append(obs_hash)
log.debug('Took %.3f seconds to hash %d observations', time.time() - start_hashing, n_obs)
self.assertEqual(len(trajectory), len(hashes))
for i in range(n_obs):
for j in range(n_obs):
if np.array_equal(trajectory[i], trajectory[j]):
self.assertEqual(hashes[i], hashes[j])
else:
self.assertNotEqual(hashes[i], hashes[j])
|
"""Manage and handle price data fron the main awattprice package."""
import asyncio
from decimal import Decimal
from typing import Optional
import awattprice
from awattprice.defaults import Region
from box import Box
from liteconfig import Config
from loguru import logger
class DetailedPriceData:
"""Store extra information in addition to the region price data to describe it in more detail."""
data: Box
lowest_price: Box
def __init__(self, data: Box):
"""Initialize a detailed price data container."""
self.data = data
lowest_price = min(data.prices, key=lambda price_point: price_point.marketprice.value)
self.lowest_price = lowest_price
def get_prices_below_value(self, below_value: Decimal, taxed: bool) -> list[int]:
"""Get prices which are on or below the given value.
:param taxed: If set prices are taxed before comparing to the below value. This doesn't affect the
below value.
"""
below_value_prices = []
for price_point in self.data.prices:
marketprice = price_point.marketprice.ct_kwh(taxed=taxed, round_=True)
if marketprice <= below_value:
below_value_prices.append(price_point)
return below_value_prices
async def collect_regions_data(config: Config, regions: list[Region]) -> Box:
"""Get the current prices for multiple regions."""
prices_tasks = [awattprice.prices.get_current_prices(region, config) for region in regions]
regions_prices = await asyncio.gather(*prices_tasks)
regions_prices = dict(zip(regions, regions_prices))
valid_regions_prices = {}
for region, prices in regions_prices.items():
if prices is None:
logger.warning(f"No current price data for region {region.name}. Skipping it.")
continue
valid_regions_prices[region] = prices
detailed_regions_prices = {
region: DetailedPriceData(data=prices) for region, prices in valid_regions_prices.items()
}
return detailed_regions_prices
|
import os, time
def cmd_say(msg):
os.system("echo '{}' | say".format(msg))
def count_down_by_seconds(num):
while num > 0:
print("还剩{}秒".format(num))
num=num - 1
time.sleep(1)
print('录制结束')
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
|
import requests
from flask import jsonify, send_from_directory, Response, request
from flask_yoloapi import endpoint, parameter
import settings
from funding.bin.utils import get_ip
from funding.bin.qr import QrCodeGenerator
from funding.factory import app, db_session
from funding.orm.orm import Proposal, User
@app.route('/api/1/proposals')
@endpoint.api(
parameter('status', type=int, location='args', default=1),
parameter('cat', type=str, location='args'),
parameter('limit', type=int, location='args', default=20),
parameter('offset', type=int, location='args', default=0)
)
def api_proposals_get(status, cat, limit, offset):
try:
proposals = Proposal.find_by_args(status=status, cat=cat, limit=limit, offset=offset)
except Exception as ex:
print(ex)
return 'error', 500
return [p.json for p in proposals]
@app.route('/api/1/convert/wow-usd')
@endpoint.api(
parameter('amount', type=int, location='args', required=True)
)
def api_coin_usd(amount):
from funding.bin.utils import Summary, coin_to_usd
prices = Summary.fetch_prices()
return jsonify(usd=coin_to_usd(amt=amount, btc_per_coin=prices['coin-btc'], usd_per_btc=prices['btc-usd']))
@app.route('/api/1/qr')
@endpoint.api(
parameter('address', type=str, location='args', required=True)
)
def api_qr_generate(address):
"""
Generate a QR image. Subject to IP throttling.
:param address: valid receiving address
:return:
"""
from funding.factory import cache
qr = QrCodeGenerator()
if not qr.exists(address):
# create a new QR code
ip = get_ip()
cache_key = 'qr_ip_%s' % ip
hit = cache.get(cache_key)
if hit and ip not in ['127.0.0.1', 'localhost']:
return Response('Wait a bit before generating a new QR', 403)
throttling_seconds = 3
cache.set(cache_key, {'wow': 'kek'}, throttling_seconds)
created = qr.create(address)
if not created:
raise Exception('Could not create QR code')
return send_from_directory('static/qr', '%s.png' % address)
@app.route('/api/1/wowlight')
@endpoint.api(
parameter('version', type=str, location='args', required=True)
)
def api_wowlight_version_check(version):
"""
Checks incoming wowlight wallet version, returns False when the version is
too old and needs to be upgraded (due to hard-forks)
:param version:
:return: bool
"""
versions = {
'0.1.0': False,
'0.1.1': False,
'0.1.2': True
}
if version not in versions:
return False
return versions[version]
@app.route('/api/1/wow/supply')
@endpoint.api()
def api_wow_supply():
from funding.factory import cache
cache_key = 'wow_supply'
hit = cache.get(cache_key)
if hit:
return float(hit.get('data', -1))
try:
resp = requests.get('http://explorer.wowne.ro/api/emission', headers={'User-Agent': 'WFS'})
resp.raise_for_status()
blob = resp.json()
assert 'data' in blob
assert 'coinbase' in blob['data']
except:
return Exception('error fetching circulating supply')
supply = blob['data'].get('coinbase') / 100000000000
cache.set(cache_key, {'data': supply}, 120)
return supply
|
# -*- coding: utf-8 -*-
from bpy.types import Panel
from mmd_tools import register_wrap
from mmd_tools.core.camera import MMDCamera
@register_wrap
class MMDCameraPanel(Panel):
bl_idname = 'OBJECT_PT_mmd_tools_camera'
bl_label = 'MMD Camera Tools'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
@classmethod
def poll(cls, context):
obj = context.active_object
return obj and (obj.type == 'CAMERA' or MMDCamera.isMMDCamera(obj))
def draw(self, context):
obj = context.active_object
layout = self.layout
if MMDCamera.isMMDCamera(obj):
mmd_cam = MMDCamera(obj)
empty = mmd_cam.object()
camera = mmd_cam.camera()
row = layout.row()
c = row.column()
c.prop(empty, 'location')
c.prop(camera, 'location', index=1, text='Distance')
c = row.column()
c.prop(empty, 'rotation_euler')
layout.prop(empty.mmd_camera, 'angle')
layout.prop(empty.mmd_camera, 'is_perspective')
else:
layout.operator('mmd_tools.convert_to_mmd_camera', text='Convert')
|
#Write import statements for classes invoice and invoice_item
from src.assignments.assignment9.invoice import Invoice
from src.assignments.assignment9.invoice_item import InvoiceItem
'''
LOOK AT THE TEST CASES FOR HINTS
Create an invoice object
In the loop:
Create a new InvoiceItem
Create a user controlled loop to continue until y is not typed, in loop...
Prompt user for description, quantity, and cost.
Add values to the InvoiceItem.
Add the InvoiceItem to the invoice object.
Once user types a letter other than y display the Invoice to screen
'''
invoice = Invoice('ABC Company', '03282018')
loop = 'y'
while loop == 'y':
description = input('enter description')
quantity = int(input('enter quantity'))
cost = float(input('enter cost'))
invoice_item = InvoiceItem(description, quantity, cost)
invoice.add_invoice_item(invoice_item)
loop = input('enter y to cont.')
if loop != 'y':
invoice.print_invoice()
|
# -*- coding: utf-8 -*-
"""
slicr.utils
~~~~~~~~~~~
Utility functions and helpers.
:copyright: © 2018
"""
from collections import namedtuple
def convert_args(args_dict):
"""Convert dictionary to named tuple enabling class like attribute access.
:param args_dict: Dictionary of arguments to convert.
:type args_dict: dict
:return: Named tuple of arguments.
:rtype: collections.namedtuple
"""
return namedtuple(
typename='arguments',
field_names=args_dict.keys()
)(**args_dict)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.