text stringlengths 8 6.05M |
|---|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
from TestUtils import TestUtilsMixin
log = logging.getLogger('test.auto')
class BigRootTablet(TestUtilsMixin, unittest.TestCase):
"ACCUMULO-542: A large root tablet will fail to load if it does't fit in the tserver scan buffers"
order = 80
settings = TestUtilsMixin.settings.copy()
settings['table.scan.max.memory'] = '1024'
settings['tserver.compaction.major.delay'] = '60m'
def setUp(self):
TestUtilsMixin.setUp(self);
def tearDown(self):
TestUtilsMixin.tearDown(self);
def runTest(self):
cmd = 'table !METADATA\naddsplits 0 1 2 3 4 5 6 7 8 9 a\n'
for i in range(10):
cmd += 'createtable %s\nflush -t !METADATA\n' % i
self.shell(self.masterHost(), cmd)
self.stop_accumulo()
self.start_accumulo()
def suite():
result = unittest.TestSuite()
result.addTest(BigRootTablet())
return result
|
def sort_by_height(a):
indices = [c for c,x in enumerate(a) if x==-1]
output = sorted(a)[len(indices):]
for x in indices:
output.insert(x, -1)
return output
'''
Task
Some people are standing in a row in a park. There are trees between
them which cannot be moved.
Your task is to rearrange the people by their heights in a
non-descending order without moving the trees.
Example
For a = [-1, 150, 190, 170, -1, -1, 160, 180], the output should be
[-1, 150, 160, 170, -1, -1, 180, 190].
Input/Output
[input] integer array a
If a[i] = -1, then the ith position is occupied by a tree.
Otherwise a[i] is the height of a person standing in the ith position.
Constraints:
5 ≤ a.length ≤ 30,
-1 ≤ a[i] ≤ 200.
[output] an integer array
Sorted array a with all the trees untouched.
'''
|
def group_letter_check(S):
letter = []
letterTF = []
then = 1
for i in S:
if i not in letter:
letter.append(i)
letterTF.append(True)
if len(letterTF) != 1:
letterTF[letter.index(i)-1] = False
elif i in letter and letterTF[letter.index(i)] is True:
pass
elif i in letter and letterTF[letter.index(i)] is False:
then = 0; break
return then
N = int(input())
ans = 0
for i in range(N):
l = input()
ans += group_letter_check(l)
print(ans)
# Done |
class solution:
def isSubsequence(self, s, t):
|
# 今年一定要美好!!!
# 1. 暴力枚举 + set去重
class Solution:
def longestNiceSubstring(self, s: str) -> str:
def check(s : str)->bool:
n1, n2 = len(set(s)), len(set(s.lower()))
if n1 == n2*2:
return True
else:
return False
maxl, n = -1, len(s)
start, end = 0, 0
for i in range(n):
for j in range(i+1, n):
if check(s[i:j+1]):
if j+1-i > maxl:
start, end = i, j+1
maxl = j+1-i
return s[start:end]
# 前缀和 依然要枚举,维护子串每个字母大小写是否出现,因此只用二进制即可
# 分别用两个int的低26位表示
# 贴个叶总的java题解
# class Solution {
# public String longestNiceSubstring(String s) {
# int n = s.length();
# int idx = -1, len = 0;
# for (int i = 0; i < n; i++) {
# int a = 0, b = 0;
# for (int j = i; j < n; j++) {
# char c = s.charAt(j);
# if (c >= 'a' && c <= 'z') a |= (1 << (c - 'a'));
# else b |= (1 << (c - 'A'));
# if (a == b && j - i + 1 > len) {
# idx = i; len = j - i + 1;
# }
# }
# }
# return idx == -1 ? "" : s.substring(idx, idx + len);
# }
# }
|
import functools
import inspect
from ..session import api_session, AsyncSession
__all__ = (
'APIFunctionMeta',
'BaseFunction',
'api_function',
)
def _wrap_method(cls, orig_name, meth):
@functools.wraps(meth)
def _method(*args, **kwargs):
# We need to keep the original attributes so that they could be correctly
# bound to the class/instance at runtime.
func = getattr(cls, orig_name)
coro = func(*args, **kwargs)
_api_session = api_session.get()
if _api_session is None:
raise RuntimeError(
"API functions must be called "
"inside the context of a valid API session",
)
if isinstance(_api_session, AsyncSession):
return coro
else:
if inspect.isasyncgen(coro):
return _api_session.worker_thread.execute_generator(coro)
else:
return _api_session.worker_thread.execute(coro)
return _method
def api_function(meth):
"""
Mark the wrapped method as the API function method.
"""
setattr(meth, '_backend_api', True)
return meth
class APIFunctionMeta(type):
"""
Converts all methods marked with :func:`api_function` into
session-aware methods that are either plain Python functions
or coroutines.
"""
_async = True
def __init__(cls, name, bases, attrs, **kwargs):
super().__init__(name, bases, attrs)
for attr_name, attr_value in attrs.items():
if hasattr(attr_value, '_backend_api'):
orig_name = '_orig_' + attr_name
setattr(cls, orig_name, attr_value)
wrapped = _wrap_method(cls, orig_name, attr_value)
setattr(cls, attr_name, wrapped)
class BaseFunction(metaclass=APIFunctionMeta):
pass
|
import logging, os, gensim
from collections import defaultdict
import re
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
directory = './dataset/train'
class MySentences(object):
def __iter__(self):
for filename in os.listdir(directory):
data = open(directory+'/'+filename)
for line in data:
yield line.split()
sentences = MySentences()
model = gensim.models.Word2Vec(sentences)
model.save('./wordSimilarityModel')
#model = gensim.models.Word2Vec.load('./wordSimilarityModel')
#print model
#while 1:
# print """
# 1. for word similarity
# 2. for relationships
# 3. exit
# """
# n = raw_input()
# n = n.strip()
# if n == '3':
# break
# s = raw_input()
# s = s.strip()
# s = s.split(' ')
# if n == '1':
# print model.similarity(s[0], s[1])
# if n == '2':
# print model.most_similar(positive=[s[2], s[1]], negative=[s[0]])
|
from string import ascii_uppercase
AZ = dict(zip(ascii_uppercase, xrange(1, 27)))
def product_mod_47(seq):
return reduce(lambda a, b: a * b, (AZ[c] for c in seq)) % 47
def ride(group, comet):
return 'GO' if product_mod_47(group) == product_mod_47(comet) else 'STAY'
|
'''Objects/Classes Experiment'''
# Developed by Smit Rao
class Locker(object):
'''A locker object.'''
def __init__(self, status='Closed'):
self.status = status
def open(self):
self.status = 'Open'
def close(self):
self.status = 'Closed' |
def makebold(fn):
def wrapped():
return "<b>" + fn() + "</b>"
return wrapped
def makeitalic(fn):
def wrapped():
return "<i>" + fn() + "</i>"
return wrapped
@makebold
@makeitalic
def hello():
return "hello habr"
|
__author__ = 'iceke'
import urllib2
from stage import Stage
from bs4 import BeautifulSoup
from spark_data import SparkData
from worm import Worm
from util import Util
def main():
stage_url = 'http://192.168.226.211:8012/stages/'
gc_html = Worm.get_html(stage_url+'stage/?id='+str(1)+'&attempt=0', True)
gc_soup = BeautifulSoup(gc_html, 'html.parser')
tables = gc_soup.find_all('table', 'table table-bordered table-striped table-condensed sortable')
trs = tables[1].find_all('tr')
gc_total = 0.0
for i in range(0, len(trs)):
tds = trs[i].find_all('td')
gc_str = tds[8].string.strip()
if gc_str != '':
gc_total += Util.format_second(gc_str)
print gc_total
if __name__ == '__main__':
main()
|
from battle.battleeffect.BattleEffect import BattleEffect
class RunAction(BattleEffect):
def __init__(self, source_fighter):
self.source_fighter = source_fighter
self.effect_type = None
def get_battle_text(self):
return self.source_fighter.name + " tried to run... but couldn't!!"
# TODO: Implement... |
from itertools import islice
def solution1(input):
ret = 0
cc = next(input)
mc = next(input)
if cc is None or mc is None:
return ret
ret += sum(solution1(input) for _ in range(cc))
ret += sum(islice(input, mc))
return ret
def parse_input1(input):
return map(int, input.split())
if __name__ == '__main__':
with open('input.txt') as fh:
print(solution1(parse_input1(fh.read().strip()))) |
import sys
sys.path.insert(0, 'tools/publications_generax/utils_plots')
import plot_rrf
import plot_scaling
import plot_boxplots
import plot_runtimes
import plot_ll
if (__name__ == "__main__"):
plot_rrf.plot_simulated_metrics()
#plot_rrf.plot_simulated_metrics_ils()
#plot_scaling.plot_scaling()
#plot_boxplots.plot_model_boxplots()
#plot_runtimes.plot_runtimes()
#plot_ll.plot_ll()
|
"""
Kept for Django purposes
""" |
from matplotlib import pyplot as plt
import tensorflow as tf
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
from tensorflow.keras import activations
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras import losses
import random
import numpy as np
import os
latent_dim = 64
noise_sigma = 0.35
train_AE = True
sml_train_size = 50
BUFFER_SIZE = 60000
BATCH_SIZE = 64
NOISE_DIM = 64
NUM_DIGITS = 10
# load train and test images, and pad & reshape them to (-1,32,32,1)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)).astype('float32') / 255.0
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)).astype('float32') / 255.0
x_train = np.pad(x_train, ((0, 0), (2, 2), (2, 2), (0, 0)))
x_test = np.pad(x_test, ((0, 0), (2, 2), (2, 2), (0, 0)))
print(x_train.shape)
print(x_test.shape)
exit()
y_train = keras.utils.to_categorical(y_train, num_classes=10, dtype='float32')
y_test = keras.utils.to_categorical(y_test, num_classes=10, dtype='float32')
encoder = Sequential()
encoder.add(layers.Conv2D(16, (4, 4), strides=(2, 2), activation='relu', padding='same', input_shape=(32, 32, 1)))
encoder.add(layers.Conv2D(32, (3, 3), strides=(2, 2), activation='relu', padding='same'))
encoder.add(layers.Conv2D(64, (3, 3), strides=(2, 2), activation='relu', padding='same'))
encoder.add(layers.Conv2D(96, (3, 3), strides=(2, 2), activation='relu', padding='same'))
encoder.add(layers.Reshape((2 * 2 * 96,)))
encoder.add(layers.Dense(latent_dim))
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
decoder = Sequential()
decoder.add(layers.Dense(2 * 2 * 96, activation='relu', input_shape=(latent_dim,)))
decoder.add(layers.Reshape((2, 2, 96)))
decoder.add(layers.Conv2DTranspose(64, (3, 3), strides=(2, 2), activation='relu', padding='same'))
decoder.add(layers.Conv2DTranspose(32, (3, 3), strides=(2, 2), activation='relu', padding='same'))
decoder.add(layers.Conv2DTranspose(16, (4, 4), strides=(2, 2), activation='relu', padding='same'))
decoder.add(layers.Conv2DTranspose(1, (4, 4), strides=(2, 2), activation='sigmoid', padding='same'))
autoencoder = keras.Model(encoder.inputs, decoder(encoder.outputs))
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
checkpoint_path = "model_save/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True)
if train_AE:
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True)
autoencoder.fit(x_train + noise_sigma * np.random.randn(*x_train.shape), x_train,
epochs=1,
batch_size=128,
shuffle=True,
validation_data=(x_test, x_test),
callbacks=[cp_callback])
else:
autoencoder.load_weights(checkpoint_path)
decoded_imgs = autoencoder.predict(x_test)
latent_codes = encoder.predict(x_test)
decoded_imgs = decoder.predict(latent_codes)
n = 10
plt.figure(figsize=(20, 4))
for i in range(1, n + 1):
# Display original
ax = plt.subplot(2, n, i)
plt.imshow(x_test[i].reshape(32, 32))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Display reconstruction
ax = plt.subplot(2, n, i + n)
plt.imshow(decoded_imgs[i].reshape(32, 32))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# Classifer Network
classifier = Sequential()
classifier.add(layers.Dense(32, activation='relu', input_shape=(latent_dim,)))
classifier.add(layers.Dense(10, activation='softmax'))
train_codes = encoder.predict(x_train[:sml_train_size])
test_codes = encoder.predict(x_test)
classifier.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
classifier.fit(train_codes, y_train[:sml_train_size],
epochs=2,
batch_size=16,
shuffle=True,
validation_data=(test_codes, y_test), callbacks=[cp_callback])
full_cls_enc = keras.models.clone_model(encoder)
full_cls_cls = keras.models.clone_model(classifier)
full_cls = keras.Model(full_cls_enc.inputs, full_cls_cls(full_cls_enc.outputs))
full_cls.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
full_cls.fit(x_train[:sml_train_size], y_train[:sml_train_size],
epochs=1,
batch_size=16,
shuffle=True,
validation_data=(x_test, y_test))
# defining the means in which we'll evaluate the model
criterion = keras.losses.BinaryCrossentropy(from_logits=True)
accuracy_calc = tf.keras.metrics.BinaryAccuracy()
optimizer = keras.optimizers.Adam()
def create_batches(x, y):
"""
Returns a batches of shuffeled data of BATCH_SIZE size.
"""
return tf.data.Dataset.from_tensor_slices((x, y)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
def gen():
"""
Returns a generator built from a MLP architecture network.
"""
generator = Sequential()
generator.add(layers.Dense(NOISE_DIM, activation='relu', input_shape=(NOISE_DIM,)))
generator.add(layers.BatchNormalization())
generator.add(layers.Dense(256, activation='relu'))
generator.add(layers.BatchNormalization())
generator.add(layers.Dense(100, activation='relu'))
generator.add(layers.BatchNormalization())
generator.add(layers.Dense(latent_dim))
return generator
def conditional_gen():
"""
Returns a generator built from a MLP architecture network for the conditional case.
"""
generator = Sequential()
generator.add(layers.Dense(NOISE_DIM + NUM_DIGITS, activation='relu', input_shape=(NOISE_DIM + NUM_DIGITS,)))
generator.add(layers.BatchNormalization())
generator.add(layers.Dense(256, activation='relu'))
generator.add(layers.BatchNormalization())
generator.add(layers.Dense(100, activation='relu'))
generator.add(layers.BatchNormalization())
generator.add(layers.Dense(latent_dim))
return generator
def disc():
"""
Returns a discriminator built from a MLP architecture network.
"""
discriminator = Sequential()
discriminator.add(layers.Dense(128, activation='relu', input_shape=(latent_dim,)))
discriminator.add(layers.Dense(1))
return discriminator
def conditional_disc():
"""
Returns a discriminator built from a MLP architecture network for the conditional case.
"""
discriminator = Sequential()
discriminator.add(layers.Dense(128, activation='relu', input_shape=(latent_dim + NUM_DIGITS,)))
discriminator.add(layers.Dense(1))
return discriminator
def disc_loss(true_images, false_images):
"""
Calculates the binary cross antropy. On true images the discriminator should
predict 1, and on fake images (false images) it should predict 0.
"""
true_images_loss = criterion(tf.ones_like(true_images), true_images)
false_images_loss = criterion(tf.zeros_like(false_images), false_images)
return true_images_loss + false_images_loss
def gen_loss(false_images):
"""
Calculates the binary cross antropy. On fake images (false images) it should
predict 1 to trick the discriminator.
"""
false_images_loss = criterion(tf.ones_like(false_images), false_images)
return false_images_loss
def train(training_data):
"""
Train the generator and discrimonator according the GAN's prosidure. Returns
the generator.
"""
generator = gen()
generator_optimizer = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.4)
generator_loss = tf.keras.metrics.BinaryCrossentropy(from_logits=True)
generator_acc = tf.keras.metrics.BinaryAccuracy()
discriminator = disc()
discriminator_optimizer = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.4)
discriminator_loss_true_img = tf.keras.metrics.BinaryCrossentropy(from_logits=True)
discriminator_loss_false_img = tf.keras.metrics.BinaryCrossentropy(from_logits=True)
discriminator_acc = tf.keras.metrics.BinaryAccuracy()
for epoch in range(1):
for batch, labels in training_data:
noise_matrix = tf.random.normal([BATCH_SIZE, NOISE_DIM])
latent_encoder_vec = encoder.predict(batch)
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images_vec = generator(noise_matrix, training=True)
real_output = discriminator(latent_encoder_vec, training=True)
fake_output = discriminator(generated_images_vec, training=True)
loss_gen = gen_loss(fake_output)
loss_disc = disc_loss(real_output, fake_output)
# calculating loss:
generator_loss.update_state(tf.ones_like(fake_output), tf.sigmoid(fake_output))
discriminator_loss_false_img.update_state(tf.zeros_like(fake_output), tf.sigmoid(fake_output))
discriminator_loss_true_img.update_state(tf.ones_like(real_output), tf.sigmoid(real_output))
# calculating accuracy:
true_labels = tf.concat([tf.ones_like(real_output), tf.zeros_like(fake_output)], 0)
discriminator_labels = tf.concat([real_output, fake_output], 0)
discriminator_acc.update_state(true_labels, tf.sigmoid(discriminator_labels))
generator_acc.update_state(tf.ones_like(fake_output), tf.sigmoid(fake_output))
gradients_of_generator = gen_tape.gradient(loss_gen, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(loss_disc, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
# plot imgaes using the decoder:
fake_images = decoder.predict(generated_images_vec)
num = 10
plt.figure(figsize=(20, 4))
for i in range(1, num + 1):
ax = plt.subplot(2, n, i)
plt.imshow(fake_images[i].reshape(32, 32))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
print("discriminator accuracy: ", discriminator_acc.result())
print("generator accuracy: ", generator_acc.result())
print("discriminator loss on fake images: ", discriminator_loss_false_img.result())
print("discriminator loss on real images: ", discriminator_loss_true_img.result())
print("generator loss: ", generator_loss.result())
generator_loss.reset_states()
generator_acc.reset_states()
discriminator_loss_false_img.reset_states()
discriminator_loss_true_img.reset_states()
discriminator_acc.reset_states()
return generator
def interpolating(l1, l2, title):
"""
Given two latent vectors and a title, calculates the convex linear combination
and plots 10 images on the connecting line using the decoder with the
relavent title.
"""
steps = np.linspace(0, 1, 11)
images = []
for i in steps:
vec = i * l2 + (1 - i) * l1
images.append(vec)
decoded_images = decoder.predict(np.array(images))
num = 10
plt.figure(figsize=(20, 4))
for i in range(0, num):
# Display original
ax = plt.subplot(1, num, i + 1)
plt.imshow(decoded_images[i].reshape(32, 32))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.title(title, fontsize=15)
plt.show()
def train_conditional_gan(training_data):
"""
Train the generator and discrimonator according the GAN's prosidure.
This model learns to distinguish different digits, using one hot vectors being
attached to the latent vectors/noise which represent a digit.
Returns the generator.
"""
generator = conditional_gen()
generator_optimizer = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.4)
generator_loss = tf.keras.metrics.BinaryCrossentropy(from_logits=True)
generator_acc = tf.keras.metrics.BinaryAccuracy()
discriminator = conditional_disc()
discriminator_optimizer = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.4)
discriminator_acc = tf.keras.metrics.BinaryAccuracy()
for epoch in range(1):
for batch, labels in training_data:
noise_matrix = tf.random.normal([BATCH_SIZE, NOISE_DIM])
latent_encoder_vec = encoder.predict(batch)
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
random_digits = np.random.randint(0, 9, BATCH_SIZE)
fake_one_hot_vectors = tf.one_hot(random_digits, NUM_DIGITS)
real_one_hot_vectors = labels
noise_with_digits = tf.concat([noise_matrix, fake_one_hot_vectors], axis=1)
generated_images_vec = generator(noise_with_digits, training=True)
real_output = discriminator(tf.concat([latent_encoder_vec, real_one_hot_vectors], axis=1),
training=True)
fake_output = discriminator(tf.concat([generated_images_vec, fake_one_hot_vectors], axis=1),
training=True)
loss_gen = gen_loss(fake_output)
loss_disc = disc_loss(real_output, fake_output)
# calculating loss:
generator_loss.update_state(tf.ones_like(fake_output), tf.sigmoid(fake_output))
# calculating accuracy:
true_labels = tf.concat([tf.ones_like(real_output), tf.zeros_like(fake_output)], 0)
discriminator_labels = tf.concat([real_output, fake_output], 0)
discriminator_acc.update_state(true_labels, tf.sigmoid(discriminator_labels))
generator_acc.update_state(tf.ones_like(fake_output), tf.sigmoid(fake_output))
gradients_of_generator = gen_tape.gradient(loss_gen, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(loss_disc, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
# plot all digits at increasing order using the decoder:
noise = tf.random.normal([50, NOISE_DIM])
fake_one_hot_vectors = tf.one_hot(list(range(0, 10)) * 5, 10)
generated_latent_vector = generator(tf.concat([noise, fake_one_hot_vectors], axis=1), training=True)
fake_images = decoder.predict(generated_latent_vector)
num = 50
plt.figure(figsize=(20, 4))
for i in range(1, num + 1):
# Display original
ax = plt.subplot(5, 10, i)
plt.imshow(fake_images[i - 1].reshape(32, 32))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
print("discriminator accuracy: ", discriminator_acc.result())
print("generator accuracy: ", generator_acc.result())
print("generator loss: ", generator_loss.result())
generator_loss.reset_states()
generator_acc.reset_states()
discriminator_acc.reset_states()
return generator
# Question 3:
training_data = create_batches(x_train, y_train)
gen_q3 = train(training_data)
# Interpolating :
noise1 = tf.random.normal([1, NOISE_DIM])
noise2 = tf.random.normal([1, NOISE_DIM])
l1 = gen_q3.predict(noise1)
l2 = gen_q3.predict(noise2)
interpolating(l1, l2, "Interpolating from GAN's latent space")
# Note: we could have juct taken x_test[0], x_test[1] since they differ, however
# we wanted to see many diffent options and therefor exicuted the following code
# many times (risking the fact that the two digits will be the same, but with
# high probablity we get at least one "good" intepolation)
first_img = x_test[random.randint(0, 10000)].reshape(1, 32, 32, 1)
second_img = x_test[random.randint(0, 10000)].reshape(1, 32, 32, 1)
l1 = encoder.predict(first_img)
l2 = encoder.predict(second_img)
interpolating(l1, l2, "Interpolating from AE's latent space")
# Question 4:
gen_q4 = train_conditional_gan(training_data)
|
import os
import csv
from django.core.management.base import BaseCommand
from geofr.models import Perimeter
# Field column indexes
NAME = 2
DEPARTMENT = 0
CODE = 1
MEMBER = 9
DRAINAGE_BASINS = {
'FR000001': 'Rhin-Meuse',
'FR000002': 'Artois-Picardie',
'FR000003': 'Seine-Normandie',
'FR000004': 'Loire-Bretagne',
'FR000005': 'Adour-Garonne',
'FR000006': 'Rhône- Méditérannée',
'FR000007': 'Corse',
'FR000008': 'Guadeloupe',
'FR000009': 'Martinique',
'FR000010': 'Guyane',
'FR000011': 'Réunion',
'FR000012': 'Mayotte',
}
OVERSEAS_BASINS = ('FR000008', 'FR000009', 'FR000010', 'FR000011', 'FR000012')
class Command(BaseCommand):
"""Import the list of drainage basins.
This task is highly inefficient (no batch saving, updating every row one by
one, etc.) but it will be ran only once, so it's not a big deal.
The file can be downloaded at this address:
http://www.data.eaufrance.fr/jdd/689a5b99-8d4e-488d-9305-c970b18ad64c
"""
def add_arguments(self, parser):
parser.add_argument('csv_file', nargs=1, type=str)
def handle(self, *args, **options):
# Create basin perimeters
basin_to_commune = {}
basin_to_epci = {}
for code, basin_name in DRAINAGE_BASINS.items():
Perimeter.objects.get_or_create(
scale=Perimeter.TYPES.basin,
code=code,
name=basin_name,
is_overseas=code in OVERSEAS_BASINS)
basin_to_commune[code] = list()
basin_to_epci[code] = list()
# Import data from csv file
csv_path = os.path.abspath(options['csv_file'][0])
with open(csv_path) as csv_file:
reader = csv.DictReader(csv_file, delimiter=',')
for row in reader:
commune_code = row['CdCommune']
basin_code = row['CdComiteBassin']
basin_to_commune[basin_code].append(commune_code)
# Update communes with the correct basin codes
for basin_code in basin_to_commune.keys():
Perimeter.objects \
.filter(scale=Perimeter.TYPES.commune) \
.filter(code__in=basin_to_commune[basin_code]) \
.update(basin=basin_code)
# Update epcis with basin codes
epcis = Perimeter.objects \
.filter(scale=Perimeter.TYPES.commune) \
.values_list('epci', 'basin')
for epci_code, basin_code in epcis:
basin_to_epci[basin_code].append(epci_code)
for basin_code in basin_to_epci.keys():
Perimeter.objects \
.filter(scale=Perimeter.TYPES.epci) \
.filter(code__in=basin_to_epci[basin_code]) \
.update(basin=basin_code)
|
from openerp.osv import osv, fields
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'account_expense_id': fields.many2one('account.account', u'Conta Despesa Padrao',domain="[('type','=','other'),('user_type.code','=','expense')]"),
'account_revenue_id': fields.many2one('account.account', u'Conta Receita Padrao',domain="[('type','=','other'),('user_type.code','=','income')]"),
}
res_company() |
import multiprocessing
from multiprocessing import Queue, Pool
import cv2
from src.FPS import FPS
from src.WebcamVideoStream import WebcamVideoStream
from src.ObjectDetection import ObjectDetection
class Realtime:
"""
Read and apply object detection to input video stream
"""
def __init__(self, args):
self.display = args["display"] == 1
self.queue_input = None
self.queue_output = None
self.pool = None
self.vs = None
self.fps = None
self.start_queue(
args["logger_debug"],
args["queue_size"],
args["num_workers"]
)
self.start_stream(args["input_device"])
def start_queue(self, debugger, size, workers):
"""
Starts processing queue.
"""
if debugger:
logger = multiprocessing.log_to_stderr()
logger.setLevel(multiprocessing.SUBDEBUG)
self.queue_input = Queue(maxsize=size)
self.queue_output = Queue(maxsize=size)
self.pool = Pool(workers, ObjectDetection().worker, (self.queue_input, self.queue_output))
def start_stream(self, device):
"""
Create a threaded video stream and start the FPS counter.
"""
self.vs = WebcamVideoStream(src=device).start()
self.fps = FPS().start()
def start(self):
"""
Start processing video feed.
"""
if self.display:
print()
print("=====================================================================")
print("Starting video acquisition. Press 'q' (on the video windows) to stop.")
print("=====================================================================")
print()
# Start reading and treating the video stream
running = True
while running:
running = self.capture()
self.destroy()
def capture(self):
"""
Capture and process video frame.
"""
if cv2.waitKey(1) & 0xFF == ord('q'):
return False
# Capture frame-by-frame
ret, frame = self.vs.read()
# No new frame, try again
if not ret:
return True
# Place frame in queue
self.queue_input.put(frame)
# Display the resulting frame
if self.display:
cv2.imshow('frame', cv2.cvtColor(self.queue_output.get(), cv2.COLOR_RGB2BGR))
self.fps.update()
return True
def destroy(self):
"""
Stop threads and hide OpenCV frame.
"""
# When everything done, release the capture
self.fps.stop()
self.pool.terminate()
self.vs.stop()
cv2.destroyAllWindows()
|
import torch as tc
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.nn as nn
import matplotlib.pyplot as plt
import random
USE_CUDA=tc.cuda.is_available()
device=tc.device("cuda"if USE_CUDA else "cpu")
print("asdf:",device)
random.seed(777)
tc.manual_seed(777)
if device=='cuda':
tc.cuda.manual_seed_all(777)
training_epochs=15
batch_size=100
mnist_train=dsets.MNIST(root='MNIST_data/',train=True,
transform=transforms.ToTensor(),download=True)
mnist_test=dsets.MNIST(root='MNIST_data/',train=False,
transform=transforms.ToTensor(),download=True)
data_loader=DataLoader(dataset=mnist_train,
batch_size=batch_size,shuffle=True,drop_last=True)
linear=nn.Linear(784,10,bias=True).to(device)
criterion=nn.CrossEntropyLoss().to(device)
optimizer=tc.optim.SGD(linear.parameters(),lr=0.1)
for epoch in range(training_epochs):
avg_cost=0
total_batch=len(data_loader)
for X,Y in data_loader:
X=X.view(-1,28*28).to(device)
Y=Y.to(device)
optimizer.zero_grad()
hypothesis=linear(X)
cost=criterion(hypothesis,Y)
cost.backward()
optimizer.step()
avg_cost+=cost/total_batch
print('Epoch:','%04d'%(epoch+1),'cost=','{:.9f}'.format(avg_cost))
print('Learning finished')
with tc.no_grad():
X_test=mnist_test.test_data.view(-1,28*28).float().to(device)
Y_test=mnist_test.test_labels.to(device)
prediction=linear(X_test)
correct_prediction=tc.argmax(prediction,1)==Y_test
accuracy=correct_prediction.float().mean()
print('Accuracy:',accuracy.item())
r=random.randint(0,len(mnist_test)-1)
X_single_data=mnist_test.test_data[r:r+1].view(-1,28*28).float().to(device)
Y_single_data=mnist_test.test_labels[r:r+1].to(device)
print('Label:',Y_single_data.item())
single_prediction=linear(X_single_data)
print('Prediction:',tc.argmax(single_prediction,1).item())
plt.imshow(mnist_test.test_data[r:r+1].view(28,28),
cmap='Greys',interpolation='nearest')
plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
from sanic.log import logger
from sanic.request import Request
from .specification.get_notification_type_specification import get_notification_type_list_query
__all__ = [
# SERVICES WORKING ON LANGUAGE TABLE
'get_notification_type_list'
]
async def get_notification_type_list(
request: Request) -> list:
""" Get notification list ordered by notification id desc.
:param request:
:return:
"""
ret_val = []
query_str = get_notification_type_list_query
try:
async with request.app.pg.acquire() as connection:
rows = await connection.fetch(query_str)
if rows is not None:
ret_val = [dict(x) for x in rows]
except Exception as gclerr:
logger.error('get_notification_list service erred with: {}'.format(gclerr))
return ret_val
|
#!/usr/bin/python2
# -*- coding: ascii -*-
# Ledger Capture The Flag 2018 Challenge 3
# Copyright (C) 2018 Antoine FERRON - BitLogiK
from ECDSA_BTC import *
from ECDSA_256k1 import *
load_gtable('G_Table')
import base64
# Check the validity of the example
msgAlice = "Amount:42 From:1Ppecdv2jWjZjdSJjnQs5JaGhethCsdTCL To:1QFmDXuBr9QY5NoRdbYfZBFFP5cTS9rL4E"
sigAlice = "HKaLZ/jSgiehh7cyhP5A7AXfEEwuQudjJiJqQLn2qa6Rc9oH1uZ6LztNIFEnG1Lp4EJnNF/RhXgJcky28lD/j6U="
bitcoin_verify_message("1Ppecdv2jWjZjdSJjnQs5JaGhethCsdTCL", sigAlice, msgAlice)
# Load transactions file
file = open("transactions.txt", "r")
txs = file.readlines()
file.close()
# Seek for 2 signatures with the same R = k.G
Rsigs = [x[:42] for x in txs[1::2]]
Rsame = [x for x in Rsigs if Rsigs.count(x) > 1][0]
Rsame_idx = [i for i, x in enumerate(Rsigs) if x == Rsame]
# Load these 2 txs
m1_idx = Rsame_idx[0]*2
m1 = txs[m1_idx][:-1].rstrip('\n')
sigi1 = txs[m1_idx+1].rstrip('\n')
m2_idx = Rsame_idx[1]*2
m2 = txs[m2_idx].rstrip('\n')
sigi2 = txs[m2_idx+1].rstrip('\n')
# Check proper loading
BoB_Adr_provided = "1Kx74VzYPdnJ9xxYQRAap4oNsqaAdUdNCA"
bitcoin_verify_message(BoB_Adr_provided, sigi1, m1)
bitcoin_verify_message(BoB_Adr_provided, sigi2, m2)
# Get the private key from these 2 txs
BoB_pv = recoverk(m1,sigi1, m2, sigi2)
BoB_Adr = pvtoadr(BoB_pv, False)
print "Bob computed address :",BoB_Adr
# check the private key is the good one
assert BoB_Adr == BoB_Adr_provided
# sign the message
mbob = "Amount:1000000 From:1Kx74VzYPdnJ9xxYQRAap4oNsqaAdUdNCA To:14pHuKrkRhLKsW6zxHKQ64DFGyKQffj7GW"
privkey = Private_key( BoB_pv )
hm = int(hash_msg(mbob),16)
k = 1521543600
signature = privkey.sign( hm , k )
signature_str = bitcoin_encode_sig( signature )
signature64 = base64.b64encode( signature_str )
print "Signature of",mbob
print signature64
|
################################################################################
# #
# CALCULATE TIME-DEPENDENT AND TIME-AVERAGED QUANTITIES #
# #
################################################################################
from __future__ import print_function, division
from analysis_fns import *
import hdf5_to_dict as io
import util
import os, sys
import multiprocessing
import psutil
import pickle
import itertools
import numpy as np
# Option to calculate fluxes at (just inside) r = 5
# This reduces interference from floors
floor_workaround_flux = False
# Option to ignore accretion at high magnetization (funnel)
# This also reduces interference from floors
floor_workaround_funnel = False
# Whether to calculate each expensive set of variables
# Once performed once, calculations will be ported to each new output file
calc_ravgs = True
calc_basic = True
calc_jet_profile = False
calc_jet_cuts = True
calc_lumproxy = True
calc_etot = True
calc_efluxes = False
calc_outfluxes = False
calc_pdfs = True
pdf_nbins = 200
if len(sys.argv) < 2:
util.warn('Format: python eht_analysis.py /path/to/dumps [start time] [start radial averages] [stop radial averages] [stop time]')
sys.exit()
# This doesn't seem like the _right_ way to do optional args
# Skips everything before tstart, averages between tavg_start and tavg_end
tstart = None
tavg_start = None
tavg_end = None
tend = None
if sys.argv[1] == "-d":
debug = True
path = sys.argv[2]
if len(sys.argv) > 3:
tstart = float(sys.argv[3])
if len(sys.argv) > 4:
tavg_start = float(sys.argv[4])
if len(sys.argv) > 5:
tavg_end = float(sys.argv[5])
if len(sys.argv) > 6:
tend = float(sys.argv[6])
else:
debug = False
path = sys.argv[1]
if len(sys.argv) > 2:
tstart = float(sys.argv[2])
if len(sys.argv) > 3:
tavg_start = float(sys.argv[3])
if len(sys.argv) > 4:
tavg_end = float(sys.argv[4])
if len(sys.argv) > 5:
tend = float(sys.argv[5])
dumps = io.get_dumps_list(path)
ND = len(dumps)
hdr = io.load_hdr(dumps[0])
geom = io.load_geom(hdr, path)
if tstart is None:
tstart = 0.
# If the time after which to average wasn't given, just use the back half of dumps
if tavg_start is None:
tavg_start = io.get_dump_time(dumps[ND//2]) - 0.1
# Sometimes we don't know times (i.e. above will be 0) but want averages
# We always want to average over all dumps in these cases
if tavg_start < 0.:
tavg_start = 0.
if tavg_end is None:
tavg_end = io.get_dump_time(dumps[-1])
if tavg_end == 0.:
tavg_end = float(ND)
if tend is None:
tend = io.get_dump_time(dumps[-1])
if tend == 0.:
tend = float(ND)
# Leave several extra zones if using MKS3 coordinates
if geom['metric'] == "MKS3":
iEH = i_of(geom, hdr['r_eh'])+4
else:
iEH = i_of(geom, hdr['r_eh'])
if floor_workaround_flux:
iF = i_of(geom, 5) # Measure fluxes at r=5M
else:
iF = iEH
# Max radius when computing "total" energy
iEmax = i_of(geom, 40)
# BZ luminosity
# 100M seems like the standard measuring spot (or at least, BHAC does it that way)
# L_BZ seems constant* after that, but much higher within ~50M
if geom['r_out'] < 100 or geom['r'][-1,geom['n2']//2,0] < 100: # If in theory or practice the sim is small...
iBZ = i_of(geom, 40) # most SANEs
else:
iBZ = i_of(geom, 100) # most MADs
jmin, jmax = get_j_vals(geom)
print("Running from t={} to {}, averaging from {} to {}".format(tstart, tend, tavg_start, tavg_end))
print("Using EH at zone {}, Fluxes at zone {}, Emax within zone {}, L_BZ at zone {}".format(iEH, iF, iEmax, iBZ))
def avg_dump(n):
out = {}
out['t'] = io.get_dump_time(dumps[n])
# When we don't know times, fudge
if out['t'] == 0 and n != 0:
out['t'] = n
if out['t'] < tstart or out['t'] > tend:
#print("Loaded {} / {}: {} (SKIPPED)".format((n+1), len(dumps), out['t']))
# Still return the time
return out
else:
print("Loaded {} / {}: {}".format((n+1), len(dumps), out['t']))
dump = io.load_dump(dumps[n], hdr, geom, extras=False)
# EHT Radial profiles: special fn for profile, averaged over phi, 1/3 theta, time
if calc_ravgs:
for var in ['rho', 'Theta', 'B', 'Pg', 'Ptot', 'beta', 'u^phi', 'u_phi', 'sigma', 'FM']:
out[var+'_rt'] = eht_profile(geom, d_fns[var](dump), jmin, jmax)
out[var+'_jet_rt'] = eht_profile(geom, d_fns[var](dump), 0, jmin) + eht_profile(geom, d_fns[var](dump), jmax, geom['n2'])
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[var+'_r'] = out[var+'_rt']
out[var+'_jet_r'] = out[var+'_jet_rt']
if out['t'] >= tavg_start and out['t'] <= tavg_end:
# CORRELATION FUNCTION
for var in ['rho', 'betainv']:
Rvar = corr_midplane(geom, d_fns[var](dump))
out[var+'_cf_rphi'] = Rvar
out[var+'_cf_10_phi'] = Rvar[i_of(geom,10),:]
# THETA AVERAGES
for var in ['betainv', 'sigma']:
out[var+'_25_th'] = theta_av(geom, d_fns[var](dump), i_of(geom, 25), 5, fold=False)
# These are divided averages, not average of division, so not amenable to d_fns
Fcov01, Fcov13 = Fcov(geom, dump, 0, 1), Fcov(geom, dump, 1, 3)
out['omega_hth'] = theta_av(geom, Fcov01, iEH, 1) / theta_av(geom, Fcov13, iEH, 1)
out['omega_av_hth'] = theta_av(geom, Fcov01, iEH, 5) / theta_av(geom, Fcov13, iEH, 5)
# This produces much worse results
#out['omega_alt_hth'] = theta_av(Fcov(dump, 0, 2), iEH, 1) / theta_av(Fcov(dump, 2, 3), iEH, 1)
#out['omega_alt_av_hth'] = theta_av(Fcov(dump, 0, 2), iEH-2, 5) / theta_av(Fcov(dump, 2, 3), iEH-2, 5)
if calc_basic:
# FIELD STRENGTHS
# The HARM B_unit is sqrt(4pi)*c*sqrt(rho) which has caused issues:
#norm = np.sqrt(4*np.pi) # This is what I believe matches T,N,M '11 and Narayan '12
norm = 1 # This is what the EHT comparison uses?
if geom['mixed_metrics']:
# When different, B1 will be in the _vector_ coordinates. Must perform the integral in those instead of zone coords
# Some gymnastics were done to keep in-memory size small
dxEH = np.einsum("i,...ij->...j", np.array([0, geom['dx1'], geom['dx2'], geom['dx3']]), np.linalg.inv(geom['vec_to_grid'][iEH,:,:,:]))
out['Phi_b'] = 0.5*norm * np.sum( np.fabs(dump['B1'][iEH,:,:]) * geom['gdet_vec'][iEH,:,None]*dxEH[:,None,2]*dxEH[:,None,3], axis=(0,1) )
else:
out['Phi_sph_r'] = 0.5*norm*sum_shell(geom, np.fabs(dump['B1']))
out['Phi_b'] = out['Phi_sph_r'][iEH]
out['Phi_mid_r'] = np.zeros_like(out['Phi_sph_r'])
for i in range(geom['n1']):
out['Phi_mid_r'][i] = norm*sum_plane(geom, -dump['B2'], within=i)
# FLUXES
# Radial profiles of Mdot and Edot, and their particular values
# EHT code-comparison normalization has all these values positive
for var,flux in [['Edot','FE'],['Mdot','FM'],['Ldot','FL']]:
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[flux+'_r'] = sum_shell(geom, d_fns[flux](dump))
out[var] = sum_shell(geom, d_fns[flux](dump), at_zone=iF)
# Mdot and Edot are defined inward
out['Mdot'] *= -1
out['Edot'] *= -1
# Maxima (for gauging floors)
for var in ['sigma', 'betainv', 'Theta']:
out[var+'_max'] = np.max(d_fns[var](dump))
# Minima
for var in ['rho', 'U']:
out[var+'_min'] = np.min(d_fns[var](dump))
# TODO KEL? plot in "floor space"? Full set of energy ratios?
# Profiles of different fluxes to gauge jet power calculations
if calc_jet_profile:
for var in ['rho', 'bsq', 'FM', 'FE', 'FE_EM', 'FE_Fl', 'FL', 'FL_EM', 'FL_Fl', 'betagamma', 'Be_nob', 'Be_b', 'mu']:
out[var+'_100_tht'] = np.sum(d_fns[var](dump)[iBZ], axis=-1)
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[var+'_100_th'] = out[var+'_100_tht']
out[var+'_100_thphi'] = d_fns[var](dump)[iBZ,:,:]
out[var+'_rth'] = d_fns[var](dump).mean(axis=-1)
# Blandford-Znajek Luminosity L_BZ
# This is a lot of luminosities!
if calc_jet_cuts:
# TODO cut on phi/t averages? -- needs 2-pass cut...
cuts = {'sigma1' : lambda dump : (d_fns['sigma'](dump) > 1),
#'sigma10' : lambda dump : (d_fns['sigma'](dump) > 10),
'Be_b0' : lambda dump : (d_fns['Be_b'](dump) > 0.02),
'Be_b1' : lambda dump : (d_fns['Be_b'](dump) > 1),
'Be_nob0' : lambda dump : (d_fns['Be_nob'](dump) > 0.02),
'Be_nob1' : lambda dump : (d_fns['Be_nob'](dump) > 1),
#'mu1' : lambda dump : (d_fns['mu'](dump) > 1),
#'mu2' : lambda dump : (d_fns['mu'](dump) > 2),
#'mu3' : lambda dump : (d_fns['mu'](dump) > 3),
'bg1' : lambda dump : (d_fns['betagamma'](dump) > 1.0),
'bg05' : lambda dump : (d_fns['betagamma'](dump) > 0.5),
'allp' : lambda dump : (d_fns['FE'](dump) > 0)}
# Terminology:
# LBZ = E&M energy only, any cut
# Lj = full E flux, any cut
# Ltot = Lj_allp = full luminosity wherever it is positive
for lum,flux in [['LBZ', 'FE_EM'], ['Lj', 'FE']]:
for cut in cuts.keys():
out[lum+'_'+cut+'_rt'] = sum_shell(geom, d_fns[flux](dump), mask=cuts[cut](dump))
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[lum+'_'+cut+'_r'] = out[lum+'_'+cut+'_rt']
out[lum+'_'+cut] = out[lum+'_'+cut+'_rt'][iBZ]
if calc_lumproxy:
rho, Pg, B = d_fns['rho'](dump), d_fns['Pg'](dump), d_fns['B'](dump)
# See EHT code comparison paper
j = rho**3 / Pg**2 * np.exp(-0.2 * (rho**2 / ( B * Pg**2))**(1./3.))
out['Lum_rt'] = eht_profile(geom, j, jmin, jmax)
if calc_etot:
# Total energy and current, summed by shells to allow cuts on radius
for tot_name, var_name in [['Etot', 'JE0']]:
out[tot_name+'_rt'] = sum_shell(geom, d_fns[var_name](dump))
for tot_name, var_name in [['Jsqtot', 'jsq']]:
out[tot_name+'_rt'] = sum_shell(geom, d_fns[var_name](geom, dump))
if calc_efluxes:
# Conserved (maybe; in steady state) 2D energy flux
for var in ['JE0', 'JE1', 'JE2']:
out[var+'_rt'] = sum_shell(geom, d_fns[var](dump))
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[var+'_rth'] = d_fns[var](dump).mean(axis=-1)
# Total outflowing portions of variables
if calc_outfluxes:
for name,var in [['outflow', 'FM'], ['outEflow', 'FE']]:
var_temp = d_fns[var](dump)
out[name+'_rt'] = sum_shell(geom, var_temp, mask=(var_temp > 0))
if out['t'] >= tavg_start and out['t'] <= tavg_end:
out[name+'_r'] = out[name+'_rt']
if calc_pdfs:
for var in ['betainv', 'rho']:
out[var+'_pdf'], _ = np.histogram(np.log10(d_fns[var](dump)),
bins=pdf_nbins, range=(-3.5, 3.5),
weights=np.repeat(geom['gdet'], geom['n3']).reshape((geom['n1'], geom['n2'], geom['n3'])),
density=True)
dump.clear()
del dump
return out
def merge_dict(n, out, out_full):
# Merge the output dicts
# TODO write to an HDF5 file incrementally?
for key in list(out.keys()):
if key not in out_full:
if key[-3:] == '_rt':
out_full[key] = np.zeros((ND, hdr['n1']))
elif key[-5:] == '_htht':
out_full[key] = np.zeros((ND, hdr['n2']//2))
elif key[-4:] == '_tht':
out_full[key] = np.zeros((ND, hdr['n2']))
elif key[-5:] == '_rtht':
out_full[key] = np.zeros((ND, hdr['n1'], hdr['n2']))
elif key[-7:] == '_thphit':
out_full[key] = np.zeros((ND, hdr['n2'], hdr['n3']))
elif key[-5:] == '_pdft':
out_full[key] = np.zeros((ND, pdf_nbins))
elif (key[-2:] == '_r' or key[-4:] == '_hth' or key[-3:] == '_th' or key[-4:] == '_phi' or
key[-4:] == '_rth' or key[-5:] == '_rphi' or key[-6:] == '_thphi' or key[-4:] == '_pdf'):
out_full[key] = np.zeros_like(out[key])
else:
out_full[key] = np.zeros(ND)
if (key[-2:] == '_r' or key[-4:] == '_hth' or key[-3:] == '_th' or key[-4:] == '_phi' or
key[-4:] == '_rth' or key[-5:] == '_rphi' or key[-6:] == '_thphi' or key[-4:] == '_pdf'):
# Weight the average correctly for _us_. Full weighting will be done on merge w/'avg_w'
if my_avg_range > 0:
out_full[key] += out[key]/my_avg_range
else:
out_full[key][n] = out[key]
# TODO this, properly, some other day
if ND < 200:
nstart, nmin, nmax, nend = 0, 0, ND-1, ND-1
elif ND < 300:
nstart, nmin, nmax, nend = 0, ND//2, ND-1, ND-1
else:
nstart, nmin, nmax, nend = int(tstart)//5, int(tavg_start)//5, int(tavg_end)//5, int(tend)//5
full_avg_range = nmax - nmin
if nmin < nstart: nmin = nstart
if nmin > nend: nmin = nend
if nmax < nstart: nmax = nstart
if nmax > nend: nmax = nend
my_avg_range = nmax - nmin
print("nstart = {}, nmin = {}, nmax = {} nend = {}".format(nstart,nmin,nmax,nend))
# Make a dict for merged variables, throw in what we know now to make merging easier
out_full = {}
for key in ['a', 'gam', 'gam_e', 'gam_p']:
out_full[key] = hdr[key]
# Toss in the common geom lists and our weight in the overall average
out_full['r'] = geom['r'][:,hdr['n2']//2,0]
# For quick angular plots. Note most will need geometry to convert from dX2 to dth
out_full['th_eh'] = geom['th'][iEH,:,0]
out_full['th_bz'] = geom['th'][iBZ,:,0]
out_full['phi'] = geom['phi'][0,hdr['n2']//2,:]
out_full['avg_start'] = tavg_start
out_full['avg_end'] = tavg_end
out_full['avg_w'] = my_avg_range / full_avg_range
print("Will weight averages by {}".format(out_full['avg_w']))
# Fill the output dict with all per-dump or averaged stuff
# Hopefully in a way that doesn't keep too much of it around in memory
nthreads = util.calc_nthreads(hdr, pad=0.2)
util.iter_parallel(avg_dump, merge_dict, out_full, ND, nthreads)
# Add divBmax from HARM's own diagnostic output, if available. We can recompute the rest, but not this
diag = io.load_log(path)
if diag is not None:
out_full['t_d'] = diag['t']
out_full['divbmax_d'] = diag['divbmax']
# Deduce the name of the output file
if tstart > 0 or tend < 10000:
outfname = "eht_out_{}_{}.p".format(tstart,tend)
else:
outfname = "eht_out.p"
# See if there's anything already there we're not calculating, and import it
if os.path.exists(outfname):
with open(outfname, "rb") as prev_file:
out_old = pickle.load(prev_file)
for key in out_old:
if key not in out_full:
out_full[key] = out_old[key]
# OUTPUT
with open(outfname, "wb") as outf:
print("Writing {}".format(outfname))
pickle.dump(out_full, outf)
|
import json
www=[]
file=open('C:\Users\lenovo\PycharmProjects\user\movieplus.json','r+',encoding='utf-8')
for line in file.readlines():
we=json.loads(line)
print(we)
www.append(we)
result=[]
for le in www:
if le not in result:
result.append(le)
print(len(result))
print(result[0])
for item in result:
tyu = open('movie.json', 'a', encoding='utf-8')
content = json.dumps(dict(item), ensure_ascii=False) + '\n'
tyu.write(content) |
import cv2
# 需要裁剪的图片路径
infile = '/home/asimov/PycharmProjects/DataMining/深度学习/RNN/testimages/31.jpg'
# 裁剪后图片的保存路径
outfile = '/home/asimov/PycharmProjects/DataMining/深度学习/RNN/cut_imgs/31.jpg'
# 目标裁剪图片的宽和高
weight = 28
hight = 28
crop_size = (weight, hight)
img = cv2.imread(infile)
img_new = cv2.resize(img, crop_size, interpolation=cv2.INTER_AREA)
cv2.imwrite(outfile, img_new)
|
#!/opt/csw/bin/python3
"""
This script will move files to the FTP server.
"""
import argparse
from Ftp_Handler import Ftp_Handler
from lib import my_env
# Initialize Environment
projectname = "mowdr"
modulename = my_env.get_modulename(__file__)
config = my_env.get_inifile(projectname, __file__)
my_log = my_env.init_loghandler(config, modulename)
my_log.info('Start Application')
parser = argparse.ArgumentParser(description="Put a file on the default FTP Location")
parser.add_argument("-f", "--fileName", type=str, required=True,
help="Please provide file (full path name) to be moved.")
args = parser.parse_args()
my_log.info("Arguments: {a}".format(a=args))
fn = args.fileName
ftp = Ftp_Handler(config)
ftp.load_file(fn)
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def average(self, salary: List[int]) -> float:
return (sum(salary) - min(salary) - max(salary)) / (len(salary) - 2)
if __name__ == "__main__":
solution = Solution()
assert 2500.0 == solution.average([4000, 3000, 1000, 2000])
assert 2000.0 == solution.average([1000, 2000, 3000])
assert 3500.0 == solution.average([6000, 5000, 4000, 3000, 2000, 1000])
assert 4750.0 == solution.average([8000, 9000, 2000, 3000, 6000, 1000])
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 11:17:20 2020
@author: anusk
"""
import numpy as np
import math
import cv2
def dftuv(M, N):
u = np.arange(M)
v = np.arange(N)
idx = np.nonzero(u > M/2)
u[idx] = u[idx] -M
idy = np.nonzero(v > N/2)
v[idy] = v[idy] -N
V, U = np.meshgrid(v,u)
return U, V
def lpfilter(tipo, M, N, Do):
U, V = dftuv(M,N)
D = np.sqrt(U**2 + V**2)
if tipo == 'ideal':
H = np.matrix(D<=Do)
H = np.float32(H)
elif tipo == 'gaussian':
H = np.exp(-(D**2)/(2*(Do**2)))
else:
print('Unknown filter type.')
return H
if __name__ == "__main__":
filt = lpfilter('gaussian', 20, 20, 6 )
|
import numpy as np
from scipy.sparse import lil_matrix, csc_matrix
from scipy.sparse.linalg import spsolve
import matplotlib.pyplot as plt
import matplotlib
# Define geometry
nodes = np.array([[0, 0], [1, 0], [0.5, np.sqrt(3)/2]])
elements = np.array([[0, 1], [1, 2], [2, 0]])
# Material properties
E = 210000 # Young's modulus in N/mm^2
A = 1 # cross-sectional area in mm^2
# Define global stiffness matrix
K = lil_matrix((2*len(nodes), 2*len(nodes)))
# Assembly of global stiffness matrix
for i, j in elements:
xi, yi = nodes[i]
xj, yj = nodes[j]
L = np.sqrt((xj-xi)**2 + (yj-yi)**2)
c = (xj-xi) / L
s = (yj-yi) / L
k_local = E*A/L * np.array([[c**2, c*s, -c**2, -c*s],
[c*s, s**2, -c*s, -s**2],
[-c**2, -c*s, c**2, c*s],
[-c*s, -s**2, c*s, s**2]])
K[2*i:2*i+2, 2*i:2*i+2] += k_local[:2, :2]
K[2*i:2*i+2, 2*j:2*j+2] += k_local[:2, 2:]
K[2*j:2*j+2, 2*i:2*i+2] += k_local[2:, :2]
K[2*j:2*j+2, 2*j:2*j+2] += k_local[2:, 2:]
# Convert to CSC format for efficient solving
K = csc_matrix(K)
# Force vector
F = np.zeros(2*len(nodes))
F[4] = -1000 # force at the top node in y direction
# Boundary conditions
bc = np.zeros(2*len(nodes), dtype=bool)
bc[0] = bc[1] = bc[2] = True # fixed at the left end
bc[3] = True # free in y direction at the right end
# Apply boundary conditions
K[:, bc] = K[bc, :] = 0
K[bc, bc] = 1
F[bc] = 0
# Solve Ku = F
u = spsolve(K, F)
# Print displacements
for i in range(len(nodes)):
print(f"Node {i}: displacement = {u[2*i:2*i+2]}")
# Plot deformed shape
#deformed_nodes = nodes + u.reshape(-1, 2)
#plt.triplot(nodes[:, 0], nodes[:, 1], elements, label='Original')
#plt.triplot(deformed_nodes[:, 0], deformed_nodes[:, 1], elements, label='Deformed')
#plt.legend()
#plt.show()
# Plot deformed shape
deformed_nodes = nodes + u.reshape(-1, 2)
for element in elements:
# Original shape
plt.plot(*nodes[element].T, 'b-', label='Original')
# Deformed shape
plt.plot(*deformed_nodes[element].T, 'r-', label='Deformed')
# Because each line gets its own label, we'll fix that here
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
plt.show()
|
"""check the weather"""
from typing import Union
import requests
from wechaty import Message, Contact, Room
from wechaty.plugin import WechatyPlugin
class WeatherPlugin(WechatyPlugin):
"""weather plugin for bot"""
@property
def name(self) -> str:
"""get the name of the plugin"""
return 'weather'
async def on_message(self, msg: Message):
"""listen message event"""
from_contact = msg.talker()
text = msg.text()
room = msg.room()
if text == '今天天气如何':
conversation: Union[
Room, Contact] = from_contact if room is None else room
await conversation.ready()
response = requests.get('https://tianqiapi.com/api?version=v61&'
'appid=32896971&appsecret=5bR8Gs9x')
result = response.json()
result_msg = f'今天{result["wea"]} 最低温度{result["tem2"]}度 ' \
f'最高温度{result["tem1"]}度'
await conversation.say(result_msg)
|
class Solution(object):
def detectCapitalUse(self, word):
c = 0
for i in word:
if i == i.upper():
c += 1
return c == len(word) or (c == 1 and word[0] == word[0].upper()) or c == 0
|
import cv2
import sklearn
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
fotosRecopiladas = {}
BDFotos = []
fotoMessi = cv2.imread('messi3_rostro.jpg', cv2.IMREAD_GRAYSCALE)
fotoMessi = cv2.resize(fotoMessi, (300, 300))
fotoMessi = np.array(fotoMessi)
fotoRonaldo = cv2.imread('ronaldo1_rostro.jpg', cv2.IMREAD_GRAYSCALE)
fotoRonaldo = cv2.resize(fotoRonaldo, (300, 300))
fotoRonaldo = np.array(fotoRonaldo)
fotoHazard = cv2.imread('hazard1_rostro.jpg', cv2.IMREAD_GRAYSCALE)
fotoHazard = cv2.resize(fotoHazard, (300, 300))
fotoHazard = np.array(fotoHazard)
BDFotos = [fotoMessi, fotoRonaldo, fotoHazard]
target = [0, 1, 2]
arrayAuxiliar = []
arrayFotos = []
for foto in BDFotos:
for elementos in foto:
for elemento in elementos:
arrayAuxiliar.append(elemento)
arrayFotos.append(arrayAuxiliar)
arrayAuxiliar = []
target_names = ['Messi', 'Ronaldo', 'Hazard']
#target_names.append('Messi')
#target_names.append('Ronaldo')
#target_names.append('Hazard')
fotosRecopiladas['data'] = arrayFotos
fotosRecopiladas['target'] = target
fotosRecopiladas['target_names'] = target_names
X_train, X_test, Y_train, Y_test = train_test_split(fotosRecopiladas['data'], fotosRecopiladas['target'])
redNeuronal = MLPClassifier(max_iter=100, hidden_layer_sizes=(300, 300), alpha=0.003)
redNeuronal.fit(fotosRecopiladas['data'], fotosRecopiladas['target'])
fotoAEvaluar = cv2.imread('hazard2_rostro.jpg', cv2.IMREAD_GRAYSCALE)
fotoAEvaluar = cv2.resize(fotoAEvaluar, (300, 300))
arrayFotoAEvaluar = []
for elementos in fotoAEvaluar:
for elemento in elementos:
arrayFotoAEvaluar.append(elemento)
print(redNeuronal.predict([arrayFotoAEvaluar])) |
# Web streaming example
# Source code from the official PiCamera package
# http://picamera.readthedocs.io/en/latest/recipes2.html#web-streaming
import io
import picamera
import logging
import socketserver
import sys
from http import server
from dotenv import dotenv_values
from threading import Condition
import asyncio
import datetime
import random
import websockets
config = dotenv_values("../.env")
VERSION = "AUTOBOT {}".format(config["VERSION"])
WEBSOCKET_HOST = config["WEBSOCKET_HOST"]
WEBSOCKET_PORT = config["WEBSOCKET_PORT"]
PAGE="""\
<html>
<head>
<title>Raspberry Pi - Camera</title>
</head>
<body style="font-family:Arial;">
<center><h1>Raspberry Pi - Camera</h1></center>
<center>
<div style="height:35px;width:200px;background-color:blue;color:white;">
<span id="logLine01"></span><br/>
<span id="logLine02"></span>
</div>
</center>
<center>
<table border="1" style="font-size:x-small">
<tr>
<td></td>
<td id="center"></td>
<td></td>
<tr>
<td id="centerLeft"></td>
<td></td>
<td id="centerRight"></td>
<tr>
<td colspan="3" align="center">Tank</td>
<tr>
<td></td>
<td id="back"></td>
<td></td>
</table>
</center>
<center><img src="stream.mjpg" width="640" height="480"></center>
</body>
<script>
var logLine01 = document.getElementById('logLine01');
var logLine02 = document.getElementById('logLine02');
logLine01.innerHTML = '{}';
logLine02.innerHTML = 'Use arrow keys'
var connection = new WebSocket('ws://{}:{}/websocket');
connection.onopen = function(){{
connection.send('ping');
}};
connection.onerror = function(error){{
console.log('Websocket error '+error);
}};
connection.onmessage = function(e){{
data = e.data;
data = data.replace("b'", "");
data = data.replace("'", "");
data = data.replace("b\\"", "");
data = JSON.parse(data);
if(data["type"] == "displayLog"){{
el = document.getElementById("logLine0"+data["contents"]["line"])
el.innerHTML = data["contents"]["message"];
}} else if(data["type"] == "distanceLog"){{
for(const [key, value] of Object.entries(data["contents"])){{
document.getElementById(key).innerHTML=value;
}}
}}
}};
</script>
</html>
""".format(VERSION, WEBSOCKET_HOST, WEBSOCKET_PORT)
class StreamingOutput(object):
def __init__(self):
self.frame = None
self.buffer = io.BytesIO()
self.condition = Condition()
def write(self, buf):
if buf.startswith(b'\xff\xd8'):
# New frame, copy the existing buffer's content and notify all
# clients it's available
self.buffer.truncate()
with self.condition:
self.frame = self.buffer.getvalue()
self.condition.notify_all()
self.buffer.seek(0)
return self.buffer.write(buf)
class StreamingHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
elif self.path == '/index.html':
content = PAGE.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
elif self.path == '/stream.mjpg':
self.send_response(200)
self.send_header('Age', 0)
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
self.end_headers()
try:
while True:
with output.condition:
output.condition.wait()
frame = output.frame
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(frame))
self.end_headers()
self.wfile.write(frame)
self.wfile.write(b'\r\n')
except Exception as e:
logging.warning(
'Removed streaming client %s: %s',
self.client_address, str(e))
else:
self.send_error(404)
self.end_headers()
class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
allow_reuse_address = True
daemon_threads = True
with picamera.PiCamera(resolution='640x480', framerate=30) as camera:
output = StreamingOutput()
#Uncomment the next line to change your Pi's Camera rotation (in degrees)
#camera.rotation = 90
camera.start_recording(output, format='mjpeg')
try:
address = ('', 8000)
server = StreamingServer(address, StreamingHandler)
server.serve_forever()
finally:
camera.stop_recording() |
import tensorflow as tf
import numpy as np
from tensorflow import keras
import matplotlib.pyplot as plt
data = keras.datasets.fashion_mnist
(train_images,train_labels),(test_images,test_labels) = data.load_data()
train_images = train_images/255.0 #divide the numbers so, the rbg value ranges towards 0 and 1
test_images = test_images/255.0
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.imshow(train_images[12])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28,28)),
keras.layers.Dense(128, activation="relu"), #Rectifier Unit,makes all negative values 0
keras.layers.Dense(10, activation="softmax")
]) #softmax - probablitistic connection
model.compile(optimizer="adam", loss="sparse_categorical_croessentropy", metrics=["accuracy"])
model.fit(train_images,train_labels, epochs=10)
model.evaluate(test_images,test_labels)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('\n Accuracy:', test_acc)
|
import redis
client = redis.Redis()
client.hset('users:123', 'name', 'aan'.encode('utf-8'))
client.hset('users:123', 'email', 'aan@mail.com'.encode('utf-8'))
client.hset('users:123', 'dob', '1990-09-09'.encode('utf-8'))
print(client.hgetall('users:1234'))
print(client.hget('users:123', 'name'))
print(client.hget('users:123', 'email'))
print(client.hget('users:123', 'dob'))
print(client.hkeys('users:123')) #hanya mengambil value
print(client.hvals('users:123')) #hanya mengambil value
print(client.hlen('users:123'))
print(client.hexists('users:123', 'email'))
print(client.hexists('users:123', 'website'))
client.hexists('users:123','dob')
print(client.hexists('users:123','dob'))
client.delete('users:123')
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import subprocess
from typing import Iterable
BASH_COMPLETION_TEMPLATE = """# DO NOT EDIT.
# This script is autogenerated by build-support/bin/generate_completions.py
function pants_completions()
{{
local -r PANTS_GOALS="\\
{goals}"
{goal_options}
local -r PANTS_GLOBAL_OPTIONS="\\
{global_options}"
local current_word previous_word previous_goal
current_word=${{COMP_WORDS[COMP_CWORD]}}
previous_word=${{COMP_WORDS[COMP_CWORD-1]}}
previous_goal=$(get_previous_goal)
case "$previous_goal" in
{goal_cases}
*)
complete_options="$PANTS_GLOBAL_OPTIONS"
;;
esac
# Show goals, unless the tab completion has a hyphen - then show global/goal options instead
if [[ $current_word == -* ]]; then
COMPREPLY=( $( compgen -W "$complete_options" -- $current_word ))
else
COMPREPLY=( $( compgen -W "$PANTS_GOALS" -- $current_word ))
fi
return 0
}}
# Get the most recent goal in the command arguments, so options can be correctly applied
# This function will ignore hyphenated options when looking for the goal
# If this is empty, we're at the top-level Pants command
function get_previous_goal()
{{
local previous_goal i current_word
previous_goal=
for (( i=$COMP_CWORD; i > 0; --i )); do
current_word=${{COMP_WORDS[i]}}
if [[ $current_word != -* ]]; then
previous_goal=$current_word
break
fi
done
echo $previous_goal
}}
complete -o default -o bashdefault -F pants_completions pants
"""
GOAL_OPTIONS_TEMPLATE = """
local -r PANTS_{name}_OPTIONS="\\
{options}"
"""
GOAL_CASE_TEMPLATE = """
{goal})
complete_options="$PANTS_{bash_name}_OPTIONS"
;;
"""
def _run_pants_help():
process = subprocess.run(["./pants", "help-all"], capture_output=True, check=True)
process.check_returncode()
return json.loads(process.stdout)
def _parse_help_for_options(help: dict, scope: str) -> tuple[frozenset[str], frozenset[str]]:
"""scope represents the goal or subsystem of interest Returns a tuple containing the scoped
options, followed by unscoped."""
scoped_help_info = help["scope_to_help_info"][scope]
scoped_options = []
unscoped_options = []
for option in scoped_help_info["basic"] + scoped_help_info["advanced"]:
scoped_options.extend(option["scoped_cmd_line_args"])
unscoped_options.extend(option["unscoped_cmd_line_args"])
return (
frozenset(scoped_options),
frozenset(unscoped_options),
)
def _bashify_name(name: str) -> str:
# Replace hyphens with underscores for variable names
# Use upper-case goal names
return name.replace("-", "_").upper()
def _hydrate_bash_template(
goal_options: dict[str, Iterable[str]], global_options: Iterable[str]
) -> str:
# Populate the PANTS_GLOBAL_OPTIONS variable
global_options_str = "\\\n ".join(sorted(global_options))
# Populate the PANTS_GOALS variable
goal_names = sorted(goal_options.keys())
goals_str = "\\\n ".join(goal_names)
# Create variables of the form PANTS_{GOAL}_OPTIONS
goal_options_templates = ""
for name, options in sorted(goal_options.items()):
goal_options_templates += GOAL_OPTIONS_TEMPLATE.format(
name=_bashify_name(name), options="\\\n ".join(sorted(options))
)
# Enumerate the completion case statements and variable names
# Note that the no-match goal case is hard-coded as "*" in the template
goal_cases_templates = ""
for name in goal_names:
goal_cases_templates += GOAL_CASE_TEMPLATE.format(goal=name, bash_name=_bashify_name(name))
return BASH_COMPLETION_TEMPLATE.format(
global_options=global_options_str,
goals=goals_str,
goal_options=goal_options_templates,
goal_cases=goal_cases_templates,
)
def main() -> None:
help_results = _run_pants_help()
all_scopes: frozenset[str] = frozenset(help_results["scope_to_help_info"].keys())
goal_scopes: frozenset[str] = frozenset(help_results["name_to_goal_info"].keys())
pants_scope = ""
subsystem_scopes = all_scopes.difference([goal_scopes, pants_scope])
# Holds the scoped options we will complete after "./pants" (i.e. at the global scope)
# e.g. "./pants -<TAB>" could complete to "./pants --test-use-coverage"
all_scoped_options: set[str] = set()
# Holds the unscoped options we will complete after typing a goal
# e.g. "./pants test -<TAB>" could complete to "./pants test --use-coverage"
goal_options: dict[str, Iterable[str]] = {}
for scope in goal_scopes:
scoped_goal_options, unscoped_goal_options = _parse_help_for_options(help_results, scope)
goal_options[scope] = unscoped_goal_options
all_scoped_options.update(scoped_goal_options)
# Subsystem completion options are applied at the global level
# e.g. "./pants -<TAB>" could complete to "./pants --pytest-args"
for name in subsystem_scopes:
scoped_subsystem_options, _ = _parse_help_for_options(help_results, name)
all_scoped_options.update(scoped_subsystem_options)
# Special case for Pants options, the scope name is ""
# e.g. "./pants -<TAB>" could complete to "./pants --loop"
scoped_pants_options, _ = _parse_help_for_options(help_results, pants_scope)
all_scoped_options.update(scoped_pants_options)
print(_hydrate_bash_template(goal_options, global_options=all_scoped_options))
if __name__ == "__main__":
main()
|
emplist = ['John', 'David', 'Mark', 'Mike', 'James', 'Curry']
name = 'Maninder Singh'
# Indexing or Subscription operation
print("Employee 0 is {}.".format(emplist[0]))
print("Employee 1 is {}.".format(emplist[1]))
print("Employee 2 is {}.".format(emplist[2]))
print("Employee 3 is {}.".format(emplist[3]))
print("\nEmployee -1 is {}.".format(emplist[-1]))
print("Employee -2 is {}.".format(emplist[-2]))
print("Employee -3 is {}.".format(emplist[-3]))
print("Employee -4 is {}.".format(emplist[-4]))
print("\nCharacter 0 is {}.".format(name[0]))
print("Character 1 is {}.".format(name[1]))
print("Character 2 is {}.".format(name[2]))
print("Character 3 is {}.".format(name[3]))
print("Character 4 is {}.".format(name[4]))
print("Character 5 is {}.".format(name[5]))
print("Character 6 is {}.".format(name[6]))
print("Character 7 is {}.".format(name[7]))
# Slicing on a list
# The start position is included but the end position is excluded from the sequence slice.
print("\nEmployee list from 1 to 3 is {}.".format(emplist[1:3]))
print("Employee list from 2 to end is {}.".format(emplist[2:]))
print("Employee list from 1 to -1 is {}.".format(emplist[1:-1]))
print("Employee list from start to end is {}.".format(emplist[:]))
# You can also provide a third argument for the slice called step size.
# The default value of step size is 1.
print("\nEmployee list with step size 1 is {}.".format(emplist[::1]))
print("Employee list with step size 2 is {}.".format(emplist[::2])) # When step size is 2, we get the items with position 0, 2,..
print("Employee list with step size 3 is {}.".format(emplist[::3])) # When step size is 3, we get the items with position 0, 3,..
print("Employee list with step size 4 is {}.".format(emplist[::4])) # When step size is 4, we get the items with position 0, 4,..
print("Employee list with step size 5 is {}.".format(emplist[::5])) # When step size is 5, we get the items with position 0, 5,..
print("Employee list with step size -1 is {}.".format(emplist[::-1])) # When step size is -1, we get the reverse list.
print("Employee list with step size -2 is {}.".format(emplist[::-2])) # When step size is -2, we get the items with position -1, -3, -5,...
print("Employee list with step size -3 is {}.".format(emplist[::-3])) # When step size is -3, we get the items with position -1, -4, -7,...
|
ang=str(input())
fo=ang[::-1]
print(fo)
|
# Generated by Django 3.0.5 on 2020-08-11 06:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0002_attendance'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='city',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='employee',
name='designation',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='employee',
name='empId',
field=models.CharField(max_length=15, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='employee',
name='fname',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='employee',
name='mobile',
field=models.CharField(max_length=12),
),
migrations.AlterField(
model_name='employee',
name='pwd',
field=models.CharField(max_length=20),
),
]
|
from flask import Markup
if __name__ == '__main__':
print(Markup('<strong>Hello {}!</strong>'.format('<blink>hacker</blink>')))
print(Markup.escape('<blink>hacker</blink>'))
print(Markup('<em>Marked up</em> » HTML').striptags())
|
from django.contrib import admin
from .models import Profile, Technologie, App, Note, BuildLink
# Register your models here.
admin.site.register(Profile)
admin.site.register(Technologie)
admin.site.register(App)
admin.site.register(Note)
admin.site.register(BuildLink) |
#!/usr/bin/env python
# -*-encoding:UTF-8-*-
from django.conf.urls import url
from ..views.vicqbpmssoj import ProblemTagAPI, ProblemAPI, ContestProblemAPI, PickOneAPI
# 普通用户查看:题目标签、查看题目、pickone、查看比赛问题
urlpatterns = [
url(r"^problem/tags/?$", ProblemTagAPI.as_view(), name="problem_tag_list_api"),
url(r"^problem/?$", ProblemAPI.as_view(), name="problem_api"),
url(r"^contest/problem/?$", ContestProblemAPI.as_view(), name="contest_problem_api"),
# 需要借助前端测试
url(r"^pickone/?$", PickOneAPI.as_view(), name="pick_one_api"),
]
|
from collections import namedtuple
from functools import lru_cache
from itertools import groupby
from operator import itemgetter
# small utility function for generating translation dicts
groupdict = lambda tupgen: {k: [v for _k,v in vs] for k,vs in groupby(sorted(tupgen, key=itemgetter(0)), key=itemgetter(0))}
class EntryType:
@lru_cache(None)
def __new__(cls, desc):
return super(EntryType, cls).__new__(cls)
def __getnewargs__(self):
return (self.short,)
def __init__(self, desc):
self.desc = desc
self.short = type(self).SHORT.get(desc, 'other')
self.medium = type(self).MEDIUM.get(self.short, self.desc)
def __repr__(self):
if self.short != "other":
return self.short
else:
return "other: " + self.desc
LONG = {
"ma" : "martial arts term",
"x" : "rude or X-rated term (not displayed in educational software)",
"abbr" : "abbreviation",
"adj_i" : "adjective (keiyoushi)",
"adj_na" : "adjectival nouns or quasi-adjectives (keiyodoshi)",
"adj_no" : "nouns which may take the genitive case particle `no'",
"adj_pn" : "pre-noun adjectival (rentaishi)",
"adj_t" : "`taru' adjective",
"adj_f" : "noun or verb acting prenominally",
"adj" : "former adjective classification (being removed)",
"adv" : "adverb (fukushi)",
"adv_to" : "adverb taking the `to' particle",
"arch" : "archaism",
"ateji" : "ateji (phonetic) reading",
"aux" : "auxiliary",
"aux_v" : "auxiliary verb",
"aux_adj" : "auxiliary adjective",
"buddh" : "Buddhist term",
"chem" : "chemistry term",
"chn" : "children's language",
"col" : "colloquialism",
"comp" : "computer terminology",
"conj" : "conjunction",
"ctr" : "counter",
"derog" : "derogatory",
"eK" : "exclusively kanji",
"ek" : "exclusively kana",
"exp" : "expressions (phrases, clauses, etc.)",
"fam" : "familiar language",
"fem" : "female term or language",
"food" : "food term",
"geom" : "geometry term",
"gikun" : "gikun (meaning) reading",
"hon" : "honorific or respectful (sonkeigo) language",
"hum" : "humble (kenjougo) language",
"iK" : "word containing irregular kanji usage",
"id" : "idiomatic expression",
"ik" : "word containing irregular kana usage",
"int" : "interjection (kandoushi)",
"io" : "irregular okurigana usage",
"iv" : "irregular verb",
"ling" : "linguistics terminology",
"m_sl" : "manga slang",
"male" : "male term or language",
"male_sl" : "male slang",
"math" : "mathematics",
"mil" : "military",
"n" : "noun (common) (futsuumeishi)",
"n_adv" : "adverbial noun (fukushitekimeishi)",
"n_suf" : "noun, used as a suffix",
"n_pref" : "noun, used as a prefix",
"n_t" : "noun (temporal) (jisoumeishi)",
"num" : "numeric",
"oK" : "word containing out-dated kanji",
"obs" : "obsolete term",
"obsc" : "obscure term",
"ok" : "out-dated or obsolete kana usage",
"on_mim" : "onomatopoeic or mimetic word",
"poet" : "poetical term",
"pol" : "polite (teineigo) language",
"pref" : "prefix",
"prn" : "pronoun",
"prt" : "particle",
"physics" : "physics terminology",
"rare" : "rare",
"sens" : "sensitive",
"sl" : "slang",
"suf" : "suffix",
"un" : "unclassified",
"uK" : "word usually written using kanji alone",
"uk" : "word usually written using kana alone",
"v1" : "Ichidan verb",
"v4r" : "Yondan verb with `ru' ending (archaic)",
"v5" : "Godan verb (not completely classified)",
"v5aru" : "Godan verb - -aru special class",
"v5b" : "Godan verb with `bu' ending",
"v5g" : "Godan verb with `gu' ending",
"v5k" : "Godan verb with `ku' ending",
"v5k_s" : "Godan verb - Iku/Yuku special class",
"v5m" : "Godan verb with `mu' ending",
"v5n" : "Godan verb with `nu' ending",
"v5r" : "Godan verb with `ru' ending",
"v5r_i" : "Godan verb with `ru' ending (irregular verb)",
"v5s" : "Godan verb with `su' ending",
"v5t" : "Godan verb with `tsu' ending",
"v5u" : "Godan verb with `u' ending",
"v5u_s" : "Godan verb with `u' ending (special class)",
"v5uru" : "Godan verb - Uru old class verb (old form of Eru)",
"v5z" : "Godan verb with `zu' ending",
"vz" : "Ichidan verb - zuru verb (alternative form of -jiru verbs)",
"vi" : "intransitive verb",
"vk" : "Kuru verb - special class",
"vn" : "irregular nu verb",
"vs" : "noun or participle which takes the aux. verb suru",
"vs_s" : "suru verb - special class",
"vs_i" : "suru verb - irregular",
"kyb" : "Kyoto-ben",
"osb" : "Osaka-ben",
"ksb" : "Kansai-ben",
"ktb" : "Kantou-ben",
"tsb" : "Tosa-ben",
"thb" : "Touhoku-ben",
"tsug" : "Tsugaru-ben",
"kyu" : "Kyuushuu-ben",
"rkb" : "Ryuukyuu-ben",
"vt" : "transitive verb",
"vulg" : "vulgar expression or word",
"nokanji" : "Not a true reading of the Kanji",
# begin of jmndict entry types
"surname" : "family or surname",
"place" : "place name",
"unclassified" : "unclassified name",
"company" : "company name",
"product" : "product name",
"work" : "work of art, literature, music, etc. name",
"malegiven" : "male given name or forename",
"femalegiven" : "female given name or forename",
"person" : "full name of a particular person",
"given" : "given name or forename, gender not specified",
"station" : "railway station",
"organization" : "organization name",
"irregular" : "old or irregular kana form"
}
MEDIUM = {
"abbr" : "abbr.",
"adj_i" : "adj. verb",
"adj_na" : "な-adj.",
"adj_no" : "の-adj.",
"adj" : "adj.",
"adv" : "adverb",
"adv_to" : "と-adverb",
"aux_v" : "aux. verb",
"aux_adj" : "aux. adj.",
"ctr" : "ctr.",
"exp" : "expression",
"fam" : "familiar",
"fem" : "feminine",
"hon" : "honorific",
"hum" : "humble",
"id" : "idiomatic",
"int" : "interjection",
"iv" : "irregular verb",
"male" : "masculine",
"male_sl" : "male slang",
"n" : "noun",
"n_adv" : "adv. noun",
"n_suf" : "suffix noun",
"n_pref" : "prefix noun",
"n_t" : "temporal noun",
"num" : "numeric",
"obs" : "obsolete",
"obsc" : "obscure",
"on_mim" : "onomatopoeia",
"poet" : "poetical",
"pol" : "polite",
"uK" : "usually kanji",
"uk" : "usually kana",
"v1" : "ichidan",
"v4r" : "yondan 〜る",
"v5" : "godan",
"v5aru" : "godan 〜ある",
"v5b" : "godan〜ぶ",
"v5g" : "godan 〜ぐ",
"v5k" : "godan 〜く",
"v5k_s" : "godan 〜いく/ゆく",
"v5m" : "godan 〜む",
"v5n" : "godan 〜ぬ",
"v5r" : "godan 〜る",
"v5r_i" : "godan 〜る (irregular)",
"v5s" : "godan 〜す",
"v5t" : "godan 〜つ",
"v5u" : "godan 〜う",
"v5u_s" : "godan 〜う (special)",
"v5z" : "godan 〜ず",
"vi" : "intransitive",
"vk" : "くる-verb (special)",
"vn" : "〜ぬ-verb (irregular)",
"vs" : "する-verb",
"vs_s" : "する-verb (special)",
"vs_i" : "する-verb (irregular)",
"vt" : "transitive",
"vulg" : "vulgar",
# begin of jmndict entry types
"malegiven" : "male",
"femalegiven" : "female",
"person" : "full"}
SHORT = { v: k for k,v in LONG.items() }
class Entry(namedtuple('EntryBase', ['kanji', 'readings', 'translations', 'links'])):
def pretty_print(self, newlines=False, lang='eng'):
tlist = lambda ts: ('\n ' if newlines else '; ').join('{}. {}'.format(i+1, t.pretty_print(lang)) for i,t in enumerate(ts))
vlist = lambda vs: ', '.join(str(v) for v in vs)
vs = '{} ({})'.format(vlist(self.kanji), vlist(self.readings)) if self.kanji else vlist(self.readings)
return '{}:{}{}'.format(vs, '\n ' if newlines else ' ', tlist(self.translations))
def __str__(self):
return self.pretty_print()
class Variant(namedtuple('VariantBase', ['moji', 'info', 'prio'])):
def pretty_print(self):
if self.info:
return '{0.moji} {0.info}'.format(self)
else:
return self.moji
def __str__(self):
return self.pretty_print()
class Link(namedtuple('LinkBase', ['tag', 'description', 'uri'])):
pass
class Translation(namedtuple('TranslationBase', ['gloss',
'gloss_dict',
'kanji_limited',
'reading_limited',
'pos_info',
'xrefs',
'antonyms',
'field_of_use',
'misc',
'dialect',
'info'])):
@property
def usage_info(self):
return self.pos_info + self.misc
def pretty_print(self, lang='eng'):
if self.usage_info:
return str(self.usage_info)+' '+', '.join(self.gloss_dict[lang])
else:
return ', '.join(self.gloss_dict[lang])
def __str__(self):
return self.pretty_print()
class NameTranslation(namedtuple('NameTranslationBase', ['translations', 'types'])):
def pretty_print(self, lang='ignored'):
if self.types:
return str(self.types)+' '+', '.join(self.translations)
else:
return ', '.join(self.translations)
def __str__(self):
return self.pretty_print()
KanjiVGEntry = namedtuple('KanjiVGEntry', ['strokes', 'groups'])
KanjidicEntry = namedtuple('KanjidicEntry', [
'literal',
'codepoint',
'radicals',
'grade',
'stroke_count',
'variants',
'frequency',
'rad_names',
'jlpt_level',
'dic_numbers',
'query_codes',
'rms',
'nanori',
'decomposition',
'kanjivg'])
|
import random
class ComputerPlayer():
backgammon = None
currentBoard = None
dice = None
colour = None
howGood = 0
playerMove = 0
playerPosition = 0
def __init__(self,backgammon,currentBoard,dice,colour,howGood):
self.backgammon = backgammon
self.currentBoard = currentBoard
self.dice = dice
self.colour = colour
self.howGood = howGood
def determinePositionAndMove(self,forward):
if forward:
counter = 0
direction = 1
else:
counter = len(self.currentBoard) -1
foundAGoodMove = False
while 0 <= counter < len(self.currentBoard) and not foundAGoodMove:
if self.currentBoard[counter][0] == self.colour:
diceCounter = 0
while diceCounter < len(self.dice) and not foundAGoodMove:
die = self.dice[diceCounter]
if self.backgammon.validPlayerInstructions(self.currentBoard,self.colour,self.dice,counter,die,True):
self.playerPosition = counter
self.playerMove = die
if self.currentBoard[self.backgammon.determineNewPosition(self.colour,counter,die)] == 1:
foundAGoodMove = True
diceCounter += 1
if forward:
counter += 1
else:
counter -= 1
def rankingMove(self):
if self.howGood == 1:
self.determinePositionAndMove(True)
elif self.howGood == 2:
self.determinePositionAndMove(False)
elif self.howGood == 3:
if random.randint(0,2) == 1:
self.determinePositionAndMove(True)
else:
self.determinePositionAndMove(False)
def getPlayerPosition(self,colour,maxPosition):
self.rankingMove()
return self.playerPosition
def getPlayerDieToMove(self,colour,dice):
return self.playerMove
def displayToPlayer(self,message="",ending=""):
pass
|
#!/usr/bin/env python3
"""Includes execution of classification algorithms from skicit learn package.
Usage:
python3 words.py <URL>
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
def execute_classification(clf, data, target, split_ratio):
"""Split training set with skicit learn train_test_split function.
Train classifier on training set and evaluate it on test set.
Args:
clf: Used Classifier.
data: Available data attributes.
target: Class attribute values.
split_ratio: split ratio of data to be used for traning/testing.
Returns:
clf.score(testX, testY): Accuracy of evaluation on test data.
"""
train_x, test_x, train_y, test_y = train_test_split(data, target, train_size=split_ratio)
clf.fit(train_x, train_y)
return clf.score(test_x, test_y)
def run_algorithm(clf, data, target, n_samples):
"""Execute classification 'n_samples' times. Each time available data set will
be split differently to training and testing set.
Args:
clf: Used Classifier.
data: Available data attributes.
target: Class attribute values.
n_samples: Number of times that classification will be executed.
Returns:
np.mean(score_array): Average accuracy of evaluation on test data in 'n_samples' iterations.
np.std(score_array): Standard deviation of classification scores.
"""
score_array = np.zeros(n_samples)
for i in range(n_samples):
score_array[i] = execute_classification(clf, data, target, 0.8)
return np.mean(score_array), np.std(score_array)
def run_algorithm_with_pca(clf, data, target, n_pca_components_array, n_samples):
"""Execute classification 'n_samples' times as a function of each component in n_pca_components_array.
Each time available data set will be split differently to training and testing set.
Args:
clf: Used Classifier.
data: Available data attributes.
target: Class attribute values.
n_pca_components_array: Array of PCA components.
n_samples: Number of times that classification will be executed.
Returns:
score_mean_array: Array of evaluation scores for each PCA component in 'n_samples' iterations.
score_std_array: Array of standard deviation values of classification scores.
"""
score_mean_array = np.zeros(len(n_pca_components_array))
score_std_array = np.zeros(len(n_pca_components_array))
for i in range(len(n_pca_components_array)):
pca = PCA(n_components=n_pca_components_array[i])
pca.fit(data)
transformed_data = pca.transform(data)
scores_iteration_array = np.zeros(n_samples)
for j in range(n_samples):
scores_iteration_array[j] = execute_classification(clf, transformed_data, target, 0.8)
score_mean_array[i], score_std_array[i] = np.mean(scores_iteration_array), np.std(scores_iteration_array)
return score_mean_array, score_std_array
def calculate_data_variance_ratio(n_pca_components_array, data):
"""Calculates the variance ratio that PCA algorithm captures with different number of PCA components.
Args:
n_pca_components_array: Array of PCA components.
data: Available data set.
Returns:
vr: Array of variance ratio values.
"""
vr = np.zeros(len(n_pca_components_array))
i = 0
for n_pca_component in n_pca_components_array:
pca = PCA(n_components=n_pca_component)
pca.fit(data)
vr[i] = sum(pca.explained_variance_ratio_)
i += 1
return vr
|
# /*
# Copyright 2011, Lightbox Technologies, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# */
keyType = 'text'
valueType = 'json'
def mapper(key, entry, context):
good = False
out = None
try:
if (entry.extension() == 'txt' and entry['path'].find("Cookies") > 0):
data = entry.getStream() # can also pass in "Content" as specifier
lines = data.readlines()
if (len(lines) >= 4):
keys = ['name', 'value', 'domain', 'flags']
vals = [l.strip() for l in lines[:4]] # gets the first four items in list, and removes leading/trailing whitespace
out = dict(zip(keys, vals)) # zip takes two lists and returns a list of pairs, which dict() turns into a dictionary
good = True
except:
pass
else:
if (good):
context.emit(key, out)
|
vel = int(input('Digite a velocidade do carro: '))
if(vel > 80):
multa = (vel - 80)*7
print('Você foi multado no valor de R${}.00'.format(multa))
else:
print('Você está dentro do limite de velocidade.') |
# Generated by Django 2.2.6 on 2021-01-26 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consumers', '0015_auto_20200602_1252'),
]
operations = [
migrations.AddField(
model_name='consumer',
name='division',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
import sympy
def error(f, err_vars=None):
from sympy import Symbol, latex
s = 0
latex_names = dict()
if err_vars == None:
err_vars = f.free_symbols
for v in err_vars:
err = Symbol('latex_std_' + v.name)
s += f.diff(v)**2 * err**2
latex_names[err] = '\\sigma_{' + latex(v) + '}'
return latex(sympy.sqrt(s), symbol_names=latex_names)
N1, N2, N12, N, I= sympy.var('N_1 N_2 N_{1+2} N, I')
T = (N1 + N2 - N12) / (2 * N1 * N2)
print('\n\nT = ', T)
print(r'\sigma_T = ', error(T))
Q = I / N
print('\n\nQ = ', Q)
print(r'\sigma_Q = ', error(Q))
|
#!/usr/bin/env python3
import os
from ..lib import utils
'''
desc: get infomation of openssl
- NAME/SYNOPSIS/DESCRIPTION/RETURN VALUES/NOTES/BUGS/SEE ALSO
'''
# doc_dir - the storage directory of data
def handle_openssl(doc_dir):
print("===============================================")
print("==== Handling openssl info =====")
print("==== From official source =====")
dir = os.path.join(doc_dir, "openssl")
utils.mkdir(dir)
print("Please step into the root directory of Openssl, "\
f"and then copy doc/man3/*.pod to {dir}. \n" \
"Or you can crawl from website: https://www.openssl.org/docs/manmaster/man3/")
print("===============================================")
|
# coding=utf-8
from ctypes import *
from ft232.dll_h import *
import logging
class FT232:
def __init__(self):
self.ft232 = windll.LoadLibrary('ftd2xx.dll')
self.handle = None
# description:str, serialnum:str, location:int
def open(self, description=None, serialnum=None, location=None):
self.description = description
self.serialnum = serialnum
self.location = location
if self.description:
self.FT_OpenEx_ByDesc(self.description.encode())
elif self.serialnum:
self.FT_OpenEx_BySerialNum(self.serialnum.encode())
elif self.location:
self.FT_OpenEx_ByLocation(self.location)
else:
self.FT_Open(0)
if not self.status:
logging.debug('device %s open successfully' % (self.description))
else:
raise FT_Exception('FT_Open fail', STATUS(self.status))
def close(self):
self.FT_Close()
if not self.status:
logging.debug('device %s close successfully' % self.description)
else:
raise FT_Exception('FT_Close fail', STATUS(self.status))
def FT_ListDevices(self):
num = c_int()
self.status = self.ft232.FT_ListDevices(
byref(num), None, FT_LIST_NUMBER_ONLY)
return (self.status, num.value)
def FT_CreateDeviceInfoList(self):
num = c_int()
self.status = self.ft232. FT_CreateDeviceInfoList(byref(num))
return num
def FT_GetDeviceInfoList(self):
infos = []
num = c_int()
nodes = (FT_DEVICE_LIST_INFO_NODE * 8)()
pDest = nodes
self.status = self.ft232. FT_GetDeviceInfoList(
pDest, byref(num))
for i in range(num.value):
infos.append(pDest[i])
return infos
def FT_Open(self, iDevice):
handle = FT_HANDLE()
self.status = self.ft232.FT_Open(iDevice, byref(handle))
self.handle = handle
def FT_Close(self):
self.status = self.ft232.FT_Close(self.handle)
def FT_OpenEx_ByLocation(self, location):
handle = FT_HANDLE()
self.status = self.ft232.FT_OpenEx(
location, FT_OPEN_BY_LOCATION, byref(handle))
self.handle = handle
def FT_OpenEx_ByDesc(self, Desc):
handle = FT_HANDLE()
self.status = self.ft232.FT_OpenEx(
Desc, FT_OPEN_BY_DESCRIPTION, byref(handle))
self.handle = handle
def FT_OpenEx_BySerialNum(self, serialNum):
handle = FT_HANDLE()
self.status = self.ft232.FT_OpenEx(
serialNum, FT_OPEN_BY_SERIAL_NUMBER, byref(handle))
self.handle = handle
def FT_SetBaudRate(self, speed):
self.status = self.ft232.FT_SetBaudRate(self.handle, speed)
def FT_SetDataCharacteristics(self, uWordLength, uStopBits, uParity):
uWordLength = c_char(uWordLength)
uStopBits = c_char(uStopBits)
uParity = c_char(uParity)
self.status = self.ft232.FT_SetDataCharacteristics(self.handle,
uWordLength, uStopBits, uParity)
def FT_GetQueueStatus(self):
amountInRxQueue = c_int()
self.status = self.ft232.FT_GetQueueStatus(
self.handle, byref(amountInRxQueue))
return amountInRxQueue.value
def FT_GetComPortNumber(self):
ComPortNumber = c_int()
self.status = self.ft232.FT_GetComPortNumber(
self.handle, byref(ComPortNumber))
return ComPortNumber.value
def FT_GetDeviceInfo(self):
fttype = c_int()
devid = c_int()
serialnum = (c_char * 16)()
description = (c_char * 64)()
dummy = None
self.status = self.ft232.FT_GetDeviceInfo(self.handle, byref(
fttype), byref(devid), serialnum, description, dummy)
# print(fttype.value, devid.value, serialnum.value, description.value)
return (fttype.value, devid.value, serialnum.value, description.value)
def FT_GetStatus(self):
AmountInRxQueue = c_int()
AmountInTxQueue = c_int()
EventStatus = c_int()
self.status = self.ft232.FT_GetStatus(self.handle, byref(
AmountInRxQueue), byref(AmountInTxQueue), byref(EventStatus))
return AmountInRxQueue.value, AmountInTxQueue.value, EventStatus.value
def FT_ResetDevice(self):
self.status = self.ft232.FT_ResetDevice(self.handle)
def FT_SetTimeouts(self, ReadTimeout, WriteTimeout):
# timeout in ms
self.status = self.ft232.FT_SetTimeouts(
self.handle, ReadTimeout, WriteTimeout)
def FT_SetUSBParameters(self, InTransferSize, wOutTransferSize):
self.status = self.ft232.FT_SetUSBParameters(
self.handle, InTransferSize, wOutTransferSize)
def FT_SetLatencyTimer(self, ucTimer):
ucTimer = c_char(ucTimer)
self.status = self.ft232.FT_SetLatencyTimer(self.handle, ucTimer)
def FT_GetLatencyTimer(self, ucTimer):
# ucTimer 2-255 (ms)
self.status = self.ft232.FT_GetLatencyTimer(self.handle, ucTimer)
def FT_SetFlowControl(self, usFlowControl, uXon, uXoff):
uXon = c_char(uXon)
uXoff = c_char(uXoff)
self.status = self.ft232.FT_SetFlowControl(
self.handle, usFlowControl, uXon, uXoff)
def FT_SetBitMode(self, ucMask, ucMode):
# print(ucMask, ucMode)
# ucMask c_char
# ucMode c_char
ucMask = c_char(ucMask)
ucMode = c_char(ucMode)
self.status = self.ft232.FT_SetBitMode(self.handle, ucMask, ucMode)
logging.debug('FT_SetBitMode :' + str(ucMask) + str(ucMode))
# def FT_GetBitMode(self):
# mode = c_char()
# self.status = self.ft232.FT_GetBitmode(self.handle, bref(mode))
# return mode.value
def FT_SetChars(self, uEventCh, uEventChEn, uErrorCh, uErrorChEn):
uEventCh = c_char(uEventCh)
uEventChEn = c_char(uEventChEn)
uErrorCh = c_char(uErrorCh)
uErrorChEn = c_char(uErrorChEn)
self.status = self.ft232.FT_SetChars(self.handle, uEventCh, uEventChEn,
uErrorCh, uErrorChEn)
def FT_Read(self, dwBytesToRead):
lpBuffer = create_string_buffer(dwBytesToRead)
BytesReturned = c_int()
self.ft232.FT_Read(self.handle, lpBuffer, dwBytesToRead,
byref(BytesReturned))
self.inbytes = lpBuffer.raw
return BytesReturned.value
def FT_Write(self, outbytes):
BytesWritten = c_int()
self.status = self.ft232.FT_Write(self.handle, outbytes, len(outbytes),
byref(BytesWritten))
logging.debug('FT_Write :' + str(outbytes))
return BytesWritten.value
def check_status(self, msg=''):
if self.status:
logging.warning(str(msg) + str(STATUS(self.status)))
return False
return True
def status_is_ok(self):
if self.handle is None:
return False
self.FT_GetStatus()
return self.check_status()
def echostruct(self, tar):
# tar: FT_DEVICE_LIST_INFO_NODE s
for name, value in tar._fields_:
if name == 'ftHandle':
if getattr(tar, name):
print(name, hex(getattr(tar, name)), end=';')
else:
print(name, getattr(tar, name), end=';')
else:
print(name, getattr(tar, name), end=';')
print()
def get_devinfos(self):
devnums = self.FT_CreateDeviceInfoList().value
infos = []
num = c_int()
nodes = (FT_DEVICE_LIST_INFO_NODE * devnums)()
pDest = nodes
self.status = self.ft232. FT_GetDeviceInfoList(
pDest, byref(num))
for i in range(num.value):
info = {}
for name, value in pDest[i]._fields_:
info[name] = getattr(pDest[i], name)
infos.append(info)
return infos
def get_ft_status(self, info):
return {
'isOpend': bool(info['Flags'] & 0x01),
'isHighSpeed': bool(info['Flags'] & 0x02)
}
def get_ft_name(self, info):
return FT_DEVICE(info['Type']).name
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s') |
from bs4 import BeautifulSoup
from lxml import html
import pandas as pd
import webbrowser
import requests
import datetime
import json
import time
import csv
import sys
import re
import os
#---------------------InitializeValue--------------------------
url = 'http://edu.kiau.ac.ir'
user = ''
pasw = ''
page = 1
table = []
cookies = ''
sessionId = ''
courseList = []
tableCource = []
courseTable = []
captchaCode = []
SubjectCorse = ''
sessionIdHistory = []
payload = {
"__EVENTTARGET": '' ,
"__EVENTARGUMENT": '' ,
"__VIEWSTATE": '' ,
"__VIEWSTATEGENERATOR": '' ,
"__EVENTVALIDATION": '' ,
"txtUserName": '' ,
"txtPassword": '' ,
"texttasvir": captchaCode ,
"LoginButton0":"ورود دانشجو"
}
payloadTable = {
"ctl00$ScriptManager1": 'ctl00$UpdatePanel1|ctl00$ContentPlaceHolder1$btnSave4' ,
"ctl00$ContentPlaceHolder1$a1": "RadioButton1",
"ctl00$ContentPlaceHolder1$TextBox1": "",
"__EVENTTARGET": '' ,
"__EVENTARGUMENT": '' ,
"__LASTFOCUS": '',
"__VIEWSTATE": '' ,
"__VIEWSTATEGENERATOR": '' ,
"__VIEWSTATEENCRYPTED": '',
"__EVENTVALIDATION": '' ,
"__ASYNCPOST": True,
"ctl00$ContentPlaceHolder1$btnSave4": " جــســتجــوی دروس"
}
# --------------------Functions---------------------
def ocr_space_file(filename, overlay=False, api_key='55c76e3c7d88957', language='eng'):
""" OCR.space API request with local file.
Python3.5 - not tested on 2.7
:param filename: Your file path & name.
:param overlay: Is OCR.space overlay required in your response.
Defaults to False.
:param api_key: OCR.space API key.
Defaults to 'helloworld'.
:param language: Language code to be used in OCR.
List of available language codes can be found on https://ocr.space/OCRAPI
Defaults to 'en'.
:return: Result in JSON format.
"""
payload = {'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
}
with open(filename, 'rb') as f:
r = requests.post('https://api.ocr.space/parse/image',
files={filename: f},
data=payload,
)
return r.content.decode()
def GetValueById(object,element,id):
try:
return object.xpath('//{}[@id="{}"]/@value'.format(element,id))[0]
except:
return ''
def GetValueByName(object,element,name):
try:
return object.xpath('//{}[@name="{}"]/@value'.format(element,id))[0]
except:
return ''
def InitializeValue():
global user, pasw
# Get Value And Session Temp
BasePage = requests.get(url+"/login.aspx")
sessionId = BasePage.cookies.get('ASP.NET_SessionId')
sessionIdHistory.append(sessionId)
tree = html.fromstring(BasePage.content)
payload['__VIEWSTATE'] = GetValueById(tree,'input','__VIEWSTATE')
payload['__VIEWSTATEGENERATOR'] = GetValueById(tree,'input','__VIEWSTATEGENERATOR')
payload["txtUserName"] = user
payload["txtPassword"] = pasw
payload['__EVENTTARGET'] = GetValueById(tree,'input','__EVENTTARGET')
payload['__EVENTARGUMENT'] = GetValueById(tree,'input','__EVENTARGUMENT')
payload['__EVENTVALIDATION'] = GetValueById(tree,'input','__EVENTVALIDATION')
def GetNewSession():
global cookies
InitializeValue()
GetNewCapcha()
cookies = dict({'ASP.NET_SessionId':str(sessionIdHistory[-1])})
requests.post(url+"/login.aspx",data=payload,cookies=cookies).cookies
def GetNewCapcha():
try:
cookiesTemp = dict({'ASP.NET_SessionId':str(sessionIdHistory[-1])})
r = requests.get(url+"/captcha.aspx",cookies=cookiesTemp)
f=open('yourcaptcha.png','wb')
f.write(r.content)
f.close()
time.sleep(2)
test_file = ocr_space_file(filename='yourcaptcha.png' ,language='pol').replace('false','False')
captchaCode.append( eval(test_file)["ParsedResults"][0]["ParsedText"] )
payload['texttasvir'] = str(int(captchaCode[-1]))
except:
GetNewCapcha()
def SetPayloadList(object,page):
# Get Value And Session Temp
tree = html.fromstring(object.content)
payloadTable['__VIEWSTATE'] = GetValueById(tree,'input','__VIEWSTATE')
payloadTable['__VIEWSTATEGENERATOR'] = GetValueById(tree,'input','__VIEWSTATEGENERATOR')
payloadTable['__EVENTTARGET'] = 'ctl00$ContentPlaceHolder1$grdCourseList'
payloadTable['__EVENTARGUMENT'] = 'Page${}'.format(page)
payloadTable['__EVENTVALIDATION'] = GetValueById(tree,'input','__EVENTVALIDATION')
# payloadTable['ctl00$ContentPlaceHolder1$btnSave4'] = GetValueById(tree,'input','ctl00_ContentPlaceHolder1_btnSave4')
payloadTable['ctl00$ScriptManager1'] = 'ctl00$UpdatePanel1|ctl00$ContentPlaceHolder1$grdCourseList'
def SaveTable (DataTable):
# Open File And set mode Write
with open('KiauTables.csv', 'w',encoding='utf-8', newline='') as csvfile:
# Head
fieldnames = ['مشخصه', 'نام درس', 'مقطع کلاس', 'نظری', 'نوع عملی', 'نوع عملی', 'جنسیت', 'گروه کلاس',
'باقي مانده', 'ساعت کلاس', 'ساعت امتحان', 'ت امتحان', 'نام استاد', 'گروه بندی']
# Config head
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# Write Head
writer.writeheader()
for row in DataTable:
# Write Row
try:
writer.writerow({
fieldnames[0]: row[0],
fieldnames[1]: row[1],
fieldnames[2]: row[2],
fieldnames[3]: row[3],
fieldnames[4]: row[4],
fieldnames[5]: row[5],
fieldnames[6]: row[6],
fieldnames[7]: row[7],
fieldnames[8]: row[8],
fieldnames[9]: row[9],
fieldnames[10]: row[10],
fieldnames[11]: row[11],
fieldnames[12]: row[12],
fieldnames[13]: row[13] })
except:
pass
def parsTable(soup):
# Get row
rows = soup.find_all('tr', {'class':'GridViewRow'})
rows.extend(soup.find_all('tr', {'class':'GridViewAlternatingRow'}))
for row in rows :
colsTemp = row.find_all("td")
cols = list(map(lambda x: x.text , colsTemp))
# print(cols)
table.append(cols)
def GetTable():
global page, payload, payloadTable, cookies
# check Exist Pandas Table time.ctime(os.path.getmtime(file)
if os.path.exists("PandaDBcourses.csv"):
# get date now and get file last modifid
now = (lambda d=datetime.datetime.now(): (d.day, d.month, d.year))()
lastModified = (lambda d=datetime.datetime.fromtimestamp(os.path.getmtime("PandaDBcourses.csv")): (d.day, d.month, d.year))()
# check last modifid
if now == lastModified:
return (lambda temp=pd.read_csv("PandaDBcourses.csv") : temp.drop(columns = [temp.columns[0]]))()
for i in range(2):
page = 1
# Temp Page List
lists = requests.post(url+"/list_ara.aspx",data=payload,cookies=cookies)
# First Page List
SetPayloadList(lists,1)
payloadTable['ctl00$ScriptManager1'] = 'ctl00$UpdatePanel1|ctl00$ContentPlaceHolder1$btnSave4'
payloadTable['ctl00$ContentPlaceHolder1$a1'] = 'RadioButton{}'.format(i+1)
payloadTable.__setitem__('ctl00$ContentPlaceHolder1$btnSave4','جــســتجــوی دروس')
content = requests.post(url+"/list_ara.aspx",cookies=cookies,data=payloadTable)
payloadTable.__delitem__('ctl00$ContentPlaceHolder1$btnSave4')
page += 1
# Parse Page Body
soup = BeautifulSoup(content.text, 'html.parser')
# ParsTable
parsTable(soup)
# More Page
while(1):
try:
# set Payload and Post Request
SetPayloadList(content,page)
content = requests.post(url+"/list_ara.aspx",cookies=cookies,data=payloadTable)
# Parse Page Body
soup = BeautifulSoup(content.text, 'html.parser')
# ParsTable
parsTable(soup)
# Parse Last Page Number
PageNumber = soup.find_all('tr',{"class": "pgr"})[0].find_all('td')[-1].text
# Check Page Number
if(PageNumber != "..." and int(PageNumber) == page):
break
page += 1
except:
pass
SaveTable(table)
return SetPandas(table)
def SetPandas(dataTable):
# Head
fieldnames = ['CourseId', 'CourseName', 'ClassSection', 'Theoretical', 'PracticalType', 'PracticalTypes',
'Gender', 'ClassGroup', 'Rest', 'timeClass', 'ExamTime', 'DateExam', 'ProfessorName', 'Grouping']
# Creat Data Frame
pdTable = pd.DataFrame({
fieldnames[0]: list(int(j[0]) for j in dataTable),
fieldnames[1]: list(j[1] for j in dataTable),
fieldnames[2]: list(j[2] for j in dataTable),
fieldnames[3]: list(j[3] for j in dataTable),
fieldnames[4]: list(j[4] for j in dataTable),
fieldnames[5]: list(j[5] for j in dataTable),
fieldnames[6]: list(j[6] for j in dataTable),
fieldnames[7]: list(j[7] for j in dataTable),
fieldnames[8]: list(j[8] for j in dataTable),
fieldnames[9]: list(j[9] for j in dataTable),
fieldnames[10]: list(j[10] for j in dataTable),
fieldnames[11]: list(j[11] for j in dataTable),
fieldnames[12]: list(j[12] for j in dataTable),
fieldnames[13]: list(j[13] for j in dataTable)
})
# DataFrame To CSV
pdTable.to_csv("PandaDBcourses.csv", sep=",", encoding='utf-8')
return pdTable
def GetReportCard():
global cookies , courseList, courseTable
requests.get(url+"/Karnameha.aspx",cookies=cookies)
report = requests.get(url+"/totalkarnameh.aspx",cookies=cookies)
# ctl00_term , ctl00_ContentPlaceHolder1_dataListTotalkarnameh_ctl???_riz_karnameh_Label1
soup = BeautifulSoup( report.text , 'html.parser' )
# Get Date
date = soup.find_all(id='ctl00_term')[0].text + ' عادی'
# print(date)
for i in range(100) :
dateTow = soup.find(id=str('ctl00_ContentPlaceHolder1_dataListTotalkarnameh_ctl0{}_riz_karnameh_Label1'.format(i)))
if dateTow.text == date :
tableRow = dateTow.parent.parent.parent
rows = tableRow.find_all('tr',{'class':'GridViewRow'})
rows.extend(tableRow.find_all('tr',{'class':'GridViewAlternatingRow'}))
# print(rows)
for row in rows :
colsTemp = row.find_all("td")
cols = list(map(lambda x: x.text , colsTemp))
# print(cols)
tableCource.append(cols)
break
# Get Row Table
for row in tableCource: courseList.append(courseTable.where(courseTable['CourseId']==int(row[1])).dropna())
return courseList
def convetToHtmlGrid(dataTable):
html = pd.concat(dataTable).to_html()
file = open('CourseList.html','w',encoding='utf-8')
file.write(html)
# ----------------------Main------------------------
def main():
global cookies, ReportCard, courseTable, user, pasw
# print(sys.argv)
if len(sys.argv) != 3:
print('Please enter the correct parameter >>> Example: python kiauRequest.py UserName PassWord')
exit()
# SetUser Information
user = sys.argv[1]
pasw = sys.argv[2]
try:
# Update Session
print('Get New Session----> ', end='')
GetNewSession()
print('Done .')
# Get All Row DarsHayeErae Shode
print('Get courseTable----> ',end='')
courseTable = GetTable()
print('Done .')
# Get ReportCard
print('Get courses----> ',end='')
ReportCard = GetReportCard()
print('Done .')
# Convert data to Grid
convetToHtmlGrid(ReportCard)
print('Save File To: {}'.format(os.path.realpath('CourseList.html')))
# run File
webbrowser.open(os.path.realpath('CourseList.html'),new=2)
except:
print('The username or password is incorrect')
if __name__ == '__main__': main()
|
# -*- coding: utf-8 -*-
# Module author: @dekftgmodules, @ftgmodulesbyfl1yd
# requires: pydub numpy requests
import io
import math
import os
import requests
import numpy as np
from pydub import AudioSegment, effects
from telethon import types
from .. import loader, utils
@loader.tds
class AudioEditorMod(loader.Module):
"""Модуль для работы со звуком"""
strings = {"name": "Audio Editor"}
async def basscmd(self, message):
""".bass [уровень bass'а 2-100 (Default 2)] <reply to audio>
BassBoost"""
args = utils.get_args_raw(message)
if not args:
lvl = 2
else:
if args.isdigit() and (1 < int(args) < 101):
lvl = int(args)
else:
return await message.edit(f"[BassBoost] Укажи уровень от 2 до 100...")
audio = await get_audio(message, "BassBoost")
if not audio: return
sample_track = list(audio.audio.get_array_of_samples())
est_mean = np.mean(sample_track)
est_std = 3 * np.std(sample_track) / (math.sqrt(2))
bass_factor = int(round((est_std - est_mean) * 0.005))
attenuate_db = 0
filtered = audio.audio.low_pass_filter(bass_factor)
out = (audio.audio - attenuate_db).overlay(filtered + lvl)
await go_out(message, audio, out, audio.pref, f"{audio.pref} {lvl}lvl")
async def fvcmd(self, message):
""".fv [уровень шакала 2-100 (Default 25)] <reply to audio>
Шакалинг"""
args = utils.get_args_raw(message)
if not args:
lvl = 25
else:
if args.isdigit() and (1 < int(args) < 101):
lvl = int(args)
else:
return await message.edit(f"[Шакал] Укажи уровень от 2 до 100...")
audio = await get_audio(message, "Шакал")
if not audio:
return
out = audio.audio + lvl
await go_out(message, audio, out, audio.pref, f"{audio.pref} {lvl}lvl")
async def echoscmd(self, message):
""".echos <reply to audio>
Эхо эффект"""
audio = await get_audio(message, "Эхо эффект")
if not audio: return
out = AudioSegment.empty()
n = 200
none = io.BytesIO()
out += audio.audio + AudioSegment.from_file(none)
for i in range(5):
echo = audio.audio - 10
out = out.overlay(audio.audio, n)
n += 200
await go_out(message, audio, out, audio.pref, audio.pref)
async def volupcmd(self, message):
""".volup <reply to audio>
Увеличить громкость на 10dB"""
audio = await get_audio(message, "+10dB")
if not audio: return
out = audio.audio + 10
await go_out(message, audio, out, audio.pref, audio.pref)
async def voldwcmd(self, message):
""".voldw <reply to audio>
Уменьшить громкость на 10dB"""
audio = await get_audio(message, "-10dB")
if not audio: return
out = audio.audio - 10
await go_out(message, audio, out, audio.pref, audio.pref)
async def revscmd(self, message):
""".revs <reply to audio>
Развернуть аудио"""
audio = await get_audio(message, "Reverse")
if not audio: return
out = audio.audio.reverse()
await go_out(message, audio, out, audio.pref, audio.pref)
async def repscmd(self, message):
""".reps <reply to audio>
Повторить аудио 2 раза подряд"""
audio = await get_audio(message, "Повтор")
if not audio: return
out = audio.audio * 2
await go_out(message, audio, out, audio.pref, audio.pref)
async def slowscmd(self, message):
""".slows <reply to audio>
Замедлить аудио 0.5x"""
audio = await get_audio(message, "Замедление")
if not audio: return
s2 = audio.audio._spawn(audio.audio.raw_data, overrides={
"frame_rate": int(audio.audio.frame_rate * 0.5)})
out = s2.set_frame_rate(audio.audio.frame_rate)
await go_out(message, audio, out, audio.pref, audio.pref, audio.duration * 2)
async def fastscmd(self, message):
""".fasts <reply to audio>
Ускорить аудио 1.5x"""
audio = await get_audio(message, "Ускорение")
if not audio: return
s2 = audio.audio._spawn(audio.audio.raw_data, overrides={
"frame_rate": int(audio.audio.frame_rate * 1.5)})
out = s2.set_frame_rate(audio.audio.frame_rate)
await go_out(message, audio, out, audio.pref, audio.pref,
round(audio.duration / 2))
async def rightscmd(self, message):
""".rights <reply to audio>
Весь звук в правый канал"""
audio = await get_audio(message, "Правый канал")
if not audio: return
out = effects.pan(audio.audio, +1.0)
await go_out(message, audio, out, audio.pref, audio.pref)
async def leftscmd(self, message):
""".lefts <reply to audio>
Весь звук в левый канал"""
audio = await get_audio(message, "Левый канал")
if not audio: return
out = effects.pan(audio.audio, -1.0)
await go_out(message, audio, out, audio.pref, audio.pref)
async def normscmd(self, message):
""".norms <reply to audio>
Нормализовать звук (Из тихого - нормальный)"""
audio = await get_audio(message, "Нормализация")
if not audio: return
out = effects.normalize(audio.audio)
await go_out(message, audio, out, audio.pref, audio.pref)
async def byrobertscmd(self, message):
'''.byroberts <reply to audio>
Добавить в конец аудио "Directed by Robert B Weide"'''
audio = await get_audio(message, "Directed by...")
if not audio: return
out = audio.audio + AudioSegment.from_file(io.BytesIO(requests.get(
"https://raw.githubusercontent.com/Daniel3k00/files-for-modules/master/directed.mp3").content)).apply_gain(
+8)
await go_out(message, audio, out, audio.pref, audio.pref)
async def cutcmd(self, message):
"""Используй .cut <начало(сек):конец(сек)> <реплай на аудио/видео/гиф>."""
args = utils.get_args_raw(message).split(':')
reply = await message.get_reply_message()
if not reply or not reply.media:
return await message.edit('Нет реплая на медиа.')
if reply.media:
if args:
if len(args) == 2:
try:
await message.edit('Скачиваем...')
smth = reply.file.ext
await message.client.download_media(reply.media,
f'uncutted{smth}')
if not args[0]:
await message.edit(
f'Обрезаем с 0 сек. по {args[1]} сек....')
os.system(
f'ffmpeg -i uncutted{smth} -ss 0 -to {args[1]} -c copy cutted{smth} -y')
elif not args[1]:
end = reply.media.document.attributes[0].duration
await message.edit(
f'Обрезаем с {args[0]} сек. по {end} сек....')
os.system(
f'ffmpeg -i uncutted{smth} -ss {args[0]} -to {end} -c copy cutted{smth} -y')
else:
await message.edit(
f'Обрезаем с {args[0]} сек. по {args[1]} сек....')
os.system(
f'ffmpeg -i uncutted{smth} -ss {args[0]} -to {args[1]} -c copy cutted{smth} -y')
await message.edit('Отправляем...')
await message.client.send_file(message.to_id,
f'cutted{smth}',
reply_to=reply.id)
os.system('rm -rf uncutted* cutted*')
await message.delete()
except:
await message.edit('Этот файл не поддерживается.')
os.system('rm -rf uncutted* cutted*')
return
else:
return await message.edit('Неверно указаны аргументы.')
else:
return await message.edit('Нет аргументов')
async def get_audio(message, pref):
class audio_ae_class():
audio = None
duration = None
voice = None
pref = None
reply = None
reply = await message.get_reply_message()
if reply and reply.file and reply.file.mime_type.split("/")[0] == "audio":
ae = audio_ae_class()
ae.pref = pref
ae.reply = reply
ae.voice = reply.document.attributes[0].voice
ae.duration = reply.document.attributes[0].duration
await message.edit(f"[{pref}] Скачиваю...")
ae.audio = AudioSegment.from_file(
io.BytesIO(await reply.download_media(bytes)))
await message.edit(f"[{pref}] Работаю...")
return ae
else:
await message.edit(f"[{pref}] reply to audio...")
return None
async def go_out(message, audio, out, pref, title, fs=None):
o = io.BytesIO()
o.name = "audio." + ("ogg" if audio.voice else "mp3")
if audio.voice: out.split_to_mono()
await message.edit(f"[{pref}] Экспортирую...")
out.export(o, format="ogg" if audio.voice else "mp3",
bitrate="64k" if audio.voice else None,
codec="libopus" if audio.voice else None)
o.seek(0)
await message.edit(f"[{pref}] Отправляю...")
await message.client.send_file(message.to_id, o, reply_to=audio.reply.id,
voice_note=audio.voice, attributes=[
types.DocumentAttributeAudio(duration=fs if fs else audio.duration,
title=title,
performer="AudioEditor")] if not audio.voice else None)
await message.delete()
|
#!/usr/bin/python
import numpy as np
import pylab as py
import os,sys
from COMMON import mpc, light, grav, msun, yr, nanosec, week
import mpmath
#from time import time
############################################################################
#INPUT PARAMETERS:
case='detection' #For 'upper_limit', the FAP will be 40%, whereas for a 'detection' it will be 0.1%. The DP will be the same in both cases.
#case='detection'
#numtemvec=np.linspace(1000., 20000., 20) #Number of templates (this number should be justified!).
numtemvec=np.logspace(0., 5., 20)
#numtemvec=np.logspace(2., 5., 2)
minrange=0.001 #All values of the distribution of the statistics in the presence of the signal below this number will be disregarded.
intpoints=100 #Number of points that will draw the distribution to make the integral.
snrvec=np.linspace(1.,10.,100) #S/N values.
outputdir='../plots/'
dpt=0.95
numtem_test=10000 #Just as an example (not useful for the plot).
#-----------------------------------------------------------------
if case=='upper_limit':
fap=0.4 #False alarm probability threshold for an upper limit.
oplot='snr_threshold_more_UPP.png'
elif case=='detection':
fap=0.001 #False alarm probability threshold for a detection.
oplot='snr_threshold_more_DET.png'
def dicho(xmin,xmax,eps,Nmax,fx,pars):
'''Finds the root of function "fx" (that may have some parameters "pars") within "xmin" and "xmax" that differs from the real solution less than "eps". Written by A. Petiteau'''
xl = xmin
xh = xmax
yl = fx(xl,pars)
yh = fx(xh,pars)
yc = yh
Nit = 0
while abs(2.*(xl-xh)/(xl+xh)) > eps and Nit<Nmax :
xc = ( xh + xl ) /2.
yc = fx(xc,pars)
Nit = Nit + 1
if(yl*yc <= 0.) :
xh = xc
yh = yc
else :
xl = xc
yl = yc
return (xh+xl)/2.
def f0eq(f0,pars):
'''Function of variable "f0" and parameters "pars" whose roots have to be found.'''
numtem,fap=pars
return numtem*np.log(1.-(1.+f0)*np.exp(-f0))-np.log(1.-fap)
def f0thres(numtem,fap):
'''Gives the F0 threshold given certain inputs.'''
minf0=0.01
maxf0=100.
eps=1e-6
trials=1000
return dicho(minf0,maxf0,eps,trials,f0eq,[numtem,fap])
def dpint(f,snr):
'''Integrand of the detection probability of single sources. Since it contains a modified Bessel function, which gets very big values, it has to be defined in a special way.'''
big=mpmath.log(mpmath.besseli(1,snr*np.sqrt(2.*f)))
small=mpmath.mpf(-f-0.5*snr**2.)
normal=mpmath.log(np.sqrt(2.*f)*1./snr)
result=mpmath.exp(mpmath.fsum([big,small,normal]))
return float(result) #In the end the result should be between 0 and some sizeable number, so a float should be enough.
def dpintold(f,snr):
'''Integrand of the detection probability. This gives numerical problems because of the modified Bessel function. Larger number precision is needed.'''
return snr**(-1.)*np.sqrt(2.*f)*iv(1,snr*np.sqrt(2.*f))*np.exp(-f-0.5*snr**2.)
def dpfun(f0,snr,minrange,intpoints):
'''This function calculates the detection probability. "f0" is the F statistics threshold. "snr" is the signal to noise ratio of a particular binary. "minrange" could be around 1 percent, and it is chosen in such a way that the integrand of the detection probability will be considered zero below this value. "intpoints" is the number of points in which the integral will be performed.'''
meanf=0.5*(4.+snr**2.)
if dpint(f0,snr)<=(minrange*dpint(meanf,snr)):
if f0<=meanf:
dp=1.
elif f0>meanf:
dp=0.
elif dpint(f0,snr)>(minrange*dpint(meanf,snr)):
dif=abs(f0-meanf)
j=1
while dpint(f0+j*dif,snr)>(minrange*dpint(meanf,snr)):
j+=1
fvec=np.linspace(f0,f0+j*dif,intpoints)
dpvec=np.zeros(intpoints)
for fi in xrange(len(fvec)):
dpvec[fi]=dpint(fvec[fi],snr)
dp=np.trapz(dpvec,fvec)
#sumi=np.sum(dpvec[:-1]*np.diff(fvec))
#print 'SUMMING ',sumi
#py.plot(fvec,dpvec)
#py.show()
return dp
def snrthres(f0,minrange):
'''Minimum SNR in order to have an integrand of the detection probability that is larger than "minrange" (around 1percent) times the maximum of the distribution.'''
minsnr=0.01 #Minimum snr that can be threshold.
maxsnr=100. #Maximum snr that can be threshold.
numpoints=100 #Number of snr points to search for a threshold between minsnr and maxsnr.
snrvec=np.logspace(np.log10(minsnr),np.log10(maxsnr),numpoints)
for snri in xrange(len(snrvec)):
meanf=0.5*(4.+snrvec[snri]**2.)
maxim=dpint(meanf,snrvec[snri])
if dpint(f0,snrvec[snri])>=(maxim*minrange):
snrt=snrvec[snri]
break
if snri==0:
print 'The SNR threshold is probably not well calculated. I should set minsnr smaller.'
if snri==(len(snrvec)-1):
print 'The SNR threshold is probably not well calculated. I should set maxsnr larger.'
return snrt
#Choose plotting options that look optimal for the paper.
fig_width = 3.4039
goldenmean=(np.sqrt(5.)-1.0)/2.0
fig_height = fig_width * goldenmean
sizepoints=8
legendsizepoints=4.5
py.rcParams.update({
'backend': 'ps',
'ps.usedistiller': 'xpdf',
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'axes.titlesize': sizepoints,
'axes.labelsize': sizepoints,
'text.fontsize': sizepoints,
'xtick.labelsize': sizepoints,
'ytick.labelsize': sizepoints,
'legend.fontsize': legendsizepoints
})
left, right, top, bottom, cb_fraction=0.14, 0.94, 0.96, 0.16, 0.145 #Borders of the plot.
#Create a plot.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
#Just for the test.----------
f0=f0thres(numtem_test, fap)
dpvec=np.zeros(len(snrvec))
for snri in xrange(len(snrvec)):
dpvec[snri]=dpfun(f0,snrvec[snri],minrange,intpoints)
snrt=snrvec[abs(dpvec-dpt).argmin()]
print 'For a test of %i templates, the snr threshold is %e .' %(numtem_test, snrt)
print
#--------------
py.ion()
f0vec=np.zeros(len(numtemvec))
for numtemi in xrange(len(numtemvec)):
numtem=numtemvec[numtemi]
f0vec[numtemi]=f0thres(numtem, fap)
ax.semilogx(numtemvec, f0vec, color='black')
ax.grid()
#ax.plot(snrvec, np.ones(len(snrvec))*dpt, '--', color='black')
#ax.hlines(dpt, min(snrvec), max(snrvec), linestyle='dashed', color='black')
#ax.vlines(min(snrtvec), 0., 1., linestyle='', color='black')
#ax.vlines(max(snrtvec), 0., 1., '--', color='black')
#ax.fill_betweenx(np.ones(len(snrvec)), min(snrtvec), max(snrtvec), color='grey')
#ax.axvspan(min(snrtvec), max(snrtvec), ymin=0., ymax=1., alpha=0.5, color='grey', zorder=1)
ax.set_xlabel('$\\mathrm{Number\ of\ templates}$')
ax.set_ylabel('$\\mathcal{F}_e\\mathrm{\ threshold}$')
#ax.set_xscale('log')
#ax.set_yscale('log')
#ax.set_xlim(min(snrvec), max(snrvec))
#ax.set_xticks([ 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3])
#ax.set_xticklabels(["$-2$", "$ -1$", "$0$", "$1$", "$2$", "$3$"])
#ax.set_ylim(0., 1.)
#ax.set_yticks([1e-16, 1e-15, 1e-14, 1e-13])
#ax.set_yticklabels(["$-16$", "$-15$", "$-14$", "$-13$"])
#ax.text(10,1e-14,'$h^\\textrm{LSO}$',fontsize=9)
#ax.legend(loc='lower right',handlelength=3.5)
#fig.savefig(outputdir+oplot, transparent=True)
fig.savefig(outputdir+oplot, dpi=600)
|
#!/usr/bin/python
from glob import glob
from math import pi, sqrt
import numpy as np
import pickle
from pyrosetta import *
from pyrosetta.rosetta.core.scoring.dssp import Dssp
from pyrosetta.rosetta.core.scoring import rmsd_atoms
from pyrosetta.rosetta.core.scoring import superimpose_pose
from pyrosetta.rosetta.core.select.residue_selector import ResidueIndexSelector
from pyrosetta.rosetta.core.simple_metrics.metrics import RMSDMetric
from pyrosetta.rosetta.core.simple_metrics.per_residue_metrics import PerResidueRMSDMetric
from pyrosetta.rosetta.protocols.simple_moves import SuperimposeMover
#init('-mute all, -ignore_zero_occupancy false')
#hcv_pose = pose_from_pdb('a_to_s_ly104_WT.pdb')
#hcv_cat_res = {72:'H', 96:'D', 154:'S'}
#tev = pose_from_pdb('tev.pdb')
tev_seq = 'GESLFKGPRDYNPISSTICHLTNESDGHTTSLYGIGFGPFIITNKHLFRRNNGTLLVQSLHGVFKVKNTTTLQQHLIDGRDMIIIRMPKDFPPFPQKLKFREPQREERICLVTTNFQTKSMSSMVSDTSCTFPSSDGIFWKHWIQTKDGQCGSPLVSTRDGFIVGIHSASNFTNTNNYFTSVPKNFMELLTNQEAQQWVSGWRLNADSVLWGGHKVFMSKP'
tev_cat_res = {'H': 46, 'A': 81, 'N': 151} # In pose: H39, D74, C144
# Map lists all loop regions, catalytic triad, all in PDB (not pose) numbers
# Had to generate manually since loops might contain beta-fingers
tev_map = { 'N': range( 8, 18),
1: range( 26, 28),
2: range( 38, 40),
3: range( 44, 54),
4: range( 59, 63),
5: range( 67, 74),
6: range( 78, 82),
7: range( 87, 109),
8: range(117, 121),
9: range(126, 139),
10: range(143, 152),
11: range(158, 161),
12: range(172, 176),
'C': range(182, 221)}
htra1_map = { 'N': range(160, 183),
1: range(192, 198),
2: range(209, 214),
3: range(218, 226),
4: range(233, 235),
5: range(240, 241),
6: range(247, 250),
7: range(256, 276),
8: range(284, 290),
9: range(300, 317),
10: range(320, 329),
11: range(335, 337),
12: range(349, 351),
'C': range(357, 370)}
def get_distance(c1, c2):
""" Returns the distance between two Rosetts XYZ coordinate vectors"""
dist = sqrt((c2.x - c1.x) ** 2 + (c2.y - c1.y) ** 2 + (c2.z - c1.z) ** 2)
return dist
def find_res_ca_coords(pose, resnum):
""" For a given pose and residue number, returns the coordinates of CA """
residue = pose.residue(resnum)
CA = residue.atom('CA')
return CA.xyz()
def get_vector_obj_for_rmsa(pose, residue_number):
"""
For a given pose and residue number, returns a list of the vectors from CA
to N and CA to C.
"""
target_res = pose.residue(residue_number)
CA_N_vector = list(target_res.atom('N').xyz()-target_res.atom('CA').xyz())
CA_C_vector = list(target_res.atom('C').xyz()-target_res.atom('CA').xyz())
return [CA_N_vector, CA_C_vector]
def get_section_RMSD(pose_1, selector_1, pose_2, selector_2):
"""
Calculates CA RMSD of pose regions. If selectors are given as none, will
calculate the whole pose.
"""
rmsd = RMSDMetric()
rmsd.set_rmsd_type(rmsd_atoms.rmsd_protein_bb_ca)
rmsd.set_comparison_pose(pose_1)
if selector_1:
rmsd.set_residue_selector_reference(selector_1)
if selector_2:
rmsd.set_residue_selector(selector_2)
return rmsd.calculate(pose_2)
#def get_rmsd(pose_1, pose_2, residues_1, residues_2):
#assert len(residues_1) == len(residues_2)
#n = len(residues_1)
#difs = 0
#for i in range(n):
# r1_coords = find_res_ca_coords(pose_1, residues_1[i])
# r2_coords = find_res_ca_coords(pose_2, residues_2[i])
# difs += (r2_coords.x - r1_coords.x) ** 2
# difs += (r2_coords.y - r1_coords.y) ** 2
# difs += (r2_coords.z - r1_coords.z) ** 2
#return sqrt(difs / n)
def find_cat_res(pose):
"""
For a given pose, checks the coordinates of each CA for closest match to
the catalytic residues of HCV protease. Returns the corresponding residue
for each of the three members of the triad.
"""
HCV_coords = [find_res_ca_coords(hcv_pose, x) for x in cat_res]
matching_residue_dists = ['none', 'none', 'none']
matching_residue_numbers = ['none', 'none', 'none']
chain_length = pose.total_residue()
# Checking each residue against all members of the catalytic triad
for resnum in range(1, chain_length + 1):
res_coord = find_res_ca_coords(pose, resnum)
for n, hcv_coord in enumerate(HCV_coords):
distance = get_distance(hcv_coord, res_coord)
if matching_residue_dists[n] > distance:
matching_residue_dists[n] = distance
matching_residue_numbers[n] = resnum
# Listing matched residue numbers and residue types
catalytic_matches = {}
for res in matching_residue_numbers:
catalytic_matches[res] = str(pose.residue(res).name1())
return catalytic_matches
def get_secstruct(pose):
""" Uses DSSP to get a secondary structure string for the given pose """
sec_struct = Dssp(pose)
sec_struct.insert_ss_into_pose(pose)
ss = str(pose.secstruct())
return ss
def align_protein_sections(pose_1, selector_1, pose_2, selector_2):
"""
Aligns selected regions of two poses, superimposing the second pose onto
the first, based on CA RMSD. Returns the RMSD value.
"""
prmsd = PerResidueRMSDMetric()
prmsd.set_rmsd_type(rmsd_atoms.rmsd_protein_bb_ca)
prmsd.set_comparison_pose(pose_1)
prmsd.set_residue_selector_reference(selector_1)
prmsd.set_residue_selector(selector_2)
amap = prmsd.create_atom_id_map(pose_2)
return superimpose_pose(pose_2, pose_1, amap)
def make_subpose(pose, start=1, end=1):
""" Return pose that is a section of another pose """
# Set end if not given
if end == 1:
end = pose.total_residue()
# Make new subpose
subpose = Pose(pose, start, end)
return subpose
def get_b_factor(pose, residue):
"""
Given a pose and a residue number, will return the average b-factor of the
backbone atoms (N, CA, C) for the specified residue. Requires residue to
be input as a pose number, as opposed to a PDB number.
"""
bfactor = pose.pdb_info().bfactor
atom_index = pose.residue(residue).atom_index
total_b = 0.0
for atom in ['N', 'CA', 'C']:
total_b += bfactor(residue, atom_index(atom))
# Return average for three atoms
return total_b / 3
#def align_proteins(pose_1, pose_2):
"""
Aligns two proteins usinf the Superimpose mover. The second protein will be
aligned to the first. Accommodates proteins of different sizes by aligning
only the number of residues in the smaller protein. This assumes that the
difference between the sizes is much smaller than the total number of
residues in each.
"""
# Determining the size of the smaller pose
#min_length = min(pose_1.total_residue(), pose_2.total_residue())
# Creating mover from start to the smaller end, aligning with all BB atoms
#sim = SuperimposeMover(pose_1, min_length, 190, min_length, 190, False)
#sim.apply(pose_2)
#return
#def insert_loop(scaffold, insert, )
################################################################################
# RMSA functions provided by Will Hansen
#INPUTS: list of pairable vectors. Obj1 should be a list of vectors
#that you want to match to the corresponding obj2.
def vector_angle(v1, v2):
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def zero_vector_pair(v_pair):
trans = [0. - v_pair[0], 0. - v_pair[1], 0. - v_pair[2]]
new_pair = []
for point in v_pair:
new_point = np.array(point) + np.array(trans)
new_pair.append(new_point)
return new_pair
def calc_rmsa(obj1, obj2, ratio=(pi/6.0)):
assert len(obj1) == len(obj2)
compare_vectors = zip(obj1, obj2)
vector_ang_sum = 0.0
for vector_pairs in compare_vectors:
vector1 = zero_vector_pair(vector_pairs[0])
vector2 = zero_vector_pair(vector_pairs[1])
vector_ang_sum += vector_angle(np.array(vector1[1]), np.array(vector2[1]))
rmsa = ((vector_ang_sum/ratio)**2 / len(obj1))**0.5
return rmsa
################################################################################
class protease_info():
def __init__(self, qname, qpose, sname, spose):
self.query_name = qname.upper()
self.query_pose = qpose
self.subject_name = sname.upper()
self.subject_pose = spose
# Dali info
self.Z_score = None
self.rmsd = None
self.lali = None
self.nres = None
self.pID = None
self.description = None
# Alignment
self.alignment = None
self.aligned_residues = []
# Catalytic triad
self.nucleophile_res = None
self.nucleophile_type = None
self.catalytic_his = None
self.catalytic_his_type = None
self.catalytic_acid = None
self.catalytic_acid_type = None
# Loops
self.loop_maps = {}
self.auto_calculate()
def auto_calculate(self, dali_file='aligned_pdbs/0000_dali_pdb90_tev.txt', align_file='aligned_pdbs/0000_seq_align.txt', q_cat_res=tev_cat_res, structure_map=tev_map):
""" Run all the calculation functions """
self.get_dali_info(dali_file=dali_file)
self.get_alignment(align_file=align_file)
self.map_aligned_residues()
self.map_cat_res(q_cat_res=q_cat_res)
self.map_structure_elements(structure_map=structure_map)
return
def get_dali_info(self, dali_file='aligned_pdbs/0000_dali_pdb90_tev.txt'):
"""
Read in appropriate summary from Dali download about this protein,
including Z score (indicating structural similarity to the query
structure), RMSD to TEV protease (the original query), lali (the number
of structurally equivalent CA atoms), nres (the total number of
residues in the chain), pID (percentage of identical amino acids in
equivalent residues), and PDB description.
Header line:
Chain Z rmsd lali nres %id Description
"""
# Read in Dali summary
with open(dali_file, 'r') as r:
match_summaries = r.readlines()
# Find appropriate line in the summary by PDB name, stopping when found
summary_line = None
for ms in match_summaries:
if self.subject_name in ms.upper():
summary_line = ms.split()
break
# If no appropriate line is found, print error message and exit
if summary_line == None:
print("No matching protein identified in Dali summary")
return
# If line was found, read in its values
self.Z_score = summary_line[1]
self.rmsd = summary_line[2]
self.lali = summary_line[3]
self.nres = summary_line[4]
self.pID = summary_line[5]
self.description = ' '.join(summary_line[6:])
return
def get_alignment(self, align_file='aligned_pdbs/0000_seq_align.txt'):
"""
Read in sequence alignment file as a set of contiguous strings
Hacky--may need tweaking to generalize.
Alignment file has sets of five lines, with each set covering 60
residues in the alignment. The first line (0) is the secondary
structure of the query (TEV). The second (1) is the query sequence. The
third (2) is the identity match (indicated as positive by |). The
fourth (3) is the subject sequence. The fifth (4) is the subject
secondary structure.
"""
# Read in sequence alignment
with open(align_file, 'r') as r:
seq_aligns = r.readlines()
# Find appropriate lines in the summary by PDB name
begin_block = None
end_block = None
for n, sa in enumerate(seq_aligns):
# Only check starting lines
if 'Z-score' in sa:
# Stop capture at the next block after finding the start
if begin_block != None:
end_block = n
break
# Find beginning of block, where start line includes name
if self.subject_name in sa.upper():
begin_block = n
# Extracting relevant text block
alignment_block = seq_aligns[begin_block:end_block]
# Cleaning block
abclean = [i.strip() for i in alignment_block[2:] if i != '\n']
# Chack that there are the right number of lines
assert len(abclean) % 5 == 0
# Concatenating data portions of each alignment line. See docstring.
align_lines = {0: '', 1: '', 2: '', 3: '', 4: ''}
for n, line in enumerate(abclean):
which_set = n % 5
# Cut off before residue numbers
if which_set == 0:
max_len = len(line)
# Pad short lines
line_info = line[6:max_len]
while len(line_info) < max_len - 6:
line_info += ' '
# Adding to appropriate set
align_lines[which_set] += line_info
# Verifying all lines are equal length
line_lengths = [len(i) for i in align_lines.values()]
assert all([elem == line_lengths[0] for elem in line_lengths])
self.alignment = list(align_lines.values())
return
def map_aligned_residues(self):
"""
Feed alignment data into a list of aligned_residues, each with
corresponding information about the position in both query and the
protease being analyzed.
Start by finding the first five residues of the alignment in the pose
sequence for both query and the subject (adding 1 because Rosetta is
1-indexed). This is necessary because Dali only includes aligned
regions, whereas some subjects might have large N-terminal domains.
"""
# Get query residue number for beginning of alignment
quer_match_seq = self.alignment[1].replace('-','').upper().replace('X','')
quer_pose_seq = self.query_pose.sequence()
assert quer_match_seq[:6] in quer_pose_seq
quer_pose_num = quer_pose_seq.find(quer_match_seq[:6])
# Get subject residue number for beginning of alignment
subj_match_seq = self.alignment[3].replace('-','').upper().replace('X','')
subj_pose_seq = self.subject_pose.sequence()
assert subj_match_seq[:6] in subj_pose_seq
subj_pose_num = subj_pose_seq.find(subj_match_seq[:6])
# Loop through each residue in the alignment, adding aligned_residue
# objects to the list for each
quer_info = self.query_pose.pdb_info()
subj_info = self.subject_pose.pdb_info()
for i in range(len(self.alignment[0])):
# Check whether there is a query residue in the alignment
if self.alignment[1][i] not in ['-', 'x', 'X']:
# Increment query pose number
quer_pose_num += 1
temp_quer_pose_num = quer_pose_num
# Get query PDB number
quer_pdb_num = int(quer_info.pose2pdb(quer_pose_num).split()[0])
# Get query secondary structure
quer_dss = self.alignment[0][i].upper()
# Get query residue letter
quer_sequence = self.alignment[1][i] # Case left to check align
else:
temp_quer_pose_num = None
quer_pdb_num = None
quer_dss = None
quer_sequence = None
# Check whether there is a subject residue in the alignment
if self.alignment[3][i] not in ['-', 'x', 'X']:
# Increment subject pose number
subj_pose_num += 1
temp_subj_pose_num = subj_pose_num
# Get subject PDB number
subj_pdb_num = int(subj_info.pose2pdb(subj_pose_num).split()[0])
# Get subject secondary structure
subj_dss = self.alignment[4][i].upper()
# Get subject residue letter
subj_sequence = self.alignment[3][i] # Case left to check align
else:
temp_subj_pose_num = None
subj_pdb_num = None
subj_dss = None
subj_sequence = None
# Collect residue identity
res_identity = self.alignment[2][i]
# Collect B-factors
if temp_quer_pose_num:
qbfac = get_b_factor(self.query_pose, temp_quer_pose_num)
else:
qbfac = None
if temp_subj_pose_num:
sbfac = get_b_factor(self.subject_pose, temp_subj_pose_num)
else:
sbfac = None
# Populating aligned_residue object
a_residue = aligned_residue(
temp_quer_pose_num, quer_pdb_num,
quer_dss, quer_sequence,
temp_subj_pose_num, subj_pdb_num,
subj_dss, subj_sequence,
res_identity, qbfac, sbfac)
# Adding aligned_residue object to self.aligned_residues
self.aligned_residues.append(a_residue)
return
def map_cat_res(self, q_cat_res=tev_cat_res):
"""
Using the list of aligned residues, identify the residues in the subject
pose that match the query. Requires an input for the catalytic triad in
the form of a dict, H: histidine, A: acid residue, N: nucleophile, using
PDB (not pose) numbering.
"""
# Initialize list of matched catalytic residues as all None
subject_cat_nums = {'H': None, 'A': None, 'N': None}
subject_cat_name = {'H': None, 'A': None, 'N': None}
# Collect list of just the aligned_residue objects for the catalytic
# residues, based on
cat_matches = []
for ar in self.aligned_residues:
if ar.query_pdb_number in q_cat_res.values():
cat_matches.append(ar)
# Match catalytic residues, update subject_cat dicts
for typ, num in q_cat_res.items():
for cm in cat_matches:
if cm.query_pdb_number == num:
if cm.subject_pdb_number: # Remains None, if no match
subject_cat_nums[typ] = cm.subject_pdb_number
subject_cat_name[typ] = cm.subject_res_type
# Set attribute values
if subject_cat_name['N'] in ['A', 'C', 'S']:
self.nucleophile_res = subject_cat_nums['N']
self.nucleophile_type = subject_cat_name['N']
if subject_cat_name['H'] in ['H']:
self.catalytic_his = subject_cat_nums['H']
self.catalytic_his_type = subject_cat_name['H'] # Should always become H
if subject_cat_name['A'] in ['D', 'E']:
self.catalytic_acid = subject_cat_nums['A']
self.catalytic_acid_type = subject_cat_name['A']
return
def map_structure_elements(self, structure_map=tev_map):
"""
"""
loop_maps = {}
last_loop = max([x for x in structure_map.keys() if not isinstance(x, str)])
for loop in structure_map:
# Get boundaries
# One past the last residue of upstream loop
# One before the first residue of downstream loop
if loop == 'N': # Edge case for N-terminal region (not a loop)
n_bound = None
c_bound = structure_map[1][0] - 1
# ^ = Everything up to first res of first loop
elif loop == 'C': # Edge case for C-terminal region (not a loop)
n_bound = structure_map[last_loop][-1] + 1
# ^ = Everything after last res of last loop
c_bound = None
elif loop == 1: # Edge case for N-terminal loop
n_bound = structure_map['N'][-1] + 1
c_bound = structure_map[loop + 1][0] - 1
elif loop == last_loop: # Edge case for C-terminal loop
n_bound = structure_map[loop - 1][-1] + 1
c_bound = structure_map['C'][0] - 1
else: # General case for all interrior loops
n_bound = structure_map[loop - 1][-1] + 1
c_bound = structure_map[loop + 1][0] - 1
loop_map = matched_loop(self.query_pose, self.subject_pose,
self.aligned_residues, self.subject_name, loop, structure_map[loop], n_bound, c_bound)
loop_maps[loop] = loop_map
self.loop_maps = loop_maps
return
class aligned_residue():
"""
Data storage structure for a single residue. Includes information about
both the target residue in its own protein and the corresponding aligned
residue in the query structure. Information includes secondary structure,
whether residues are structurally matched (as opposed to unaligned), and
whether the residues are identical. Also stores residue numbers (both PDB
and pose) for both residues.
"""
def __init__(self, qpnum, qrnum, qdss, qseq, spnum, srnum, sdss, sseq, rid, qbfac, sbfac):
self.query_pose_number = qpnum
self.query_pdb_number = qrnum
self.query_sec_struct = qdss
if qseq:
self.query_res_type = qseq.upper()
else:
self.query_res_type = None
self.subject_pose_number = spnum
self.subject_pdb_number = srnum
self.subject_sec_struct = sdss
if sseq:
self.subject_res_type = sseq.upper()
else:
self.subject_res_type = None
# Determine whether residues are structurally aligned, based on case
if all([qseq, sseq]):
if all([i == i.upper() for i in [qseq, sseq]]):
self.residues_align = True
elif all([i == i.lower() for i in [qseq, sseq]]):
self.residues_align = False
else:
print('Residue cases do not match')
print(spnum, sseq, qpnum, qseq)
assert False
else:
self.residues_align = False
# Determine res identity, based on whether connection line was drawn
if rid == '|':
self.residues_equal = True
assert self.query_res_type == self.subject_res_type
else:
self.residues_equal = False
# Store B-factors
self.query_b_factor = qbfac
self.subject_b_factor = sbfac
class matched_loop():
"""
Data storage structure for loops. When taking in a loop of a query
structure, finds the edges bordering it (usually B-sheets) and looks for
residue matches within the given boundaries, which should be the starts of
the next loops. Input residues should use PDB (not pose) numbers.
"""
def __init__(self, query_pose, subject_pose, aligned_residues, source, l_name, l_range, n_bound, c_bound):
self.loop_source = source
self.loop_name = l_name
self.n_boundary = None
self.c_boundary = None
# Trim loop alignment to the range between adjacent loops/termini
trimmed_residue_alignment = \
self.get_loop_range(aligned_residues, n_bound, c_bound)
# Flanking residues
self.nearest_n_match = None
self.nearest_n_b_match = None
self.farthest_n_match = None
self.farthest_n_b_match = None
self.nearest_c_match = None
self.nearest_c_b_match = None
self.farthest_c_match = None
self.farthest_c_b_match = None
self.subject_nearest_c_match = None
loop_res = self.collect_loop_match(trimmed_residue_alignment, l_range[0], l_range[-1])
# Best matched residues for loop swap
self.N_splice_res = None
self.C_splice_res = None
self.query_loop_size = None
self.subject_loop_size = None
self.simple_pick_splice()
# Overlaps
self.n_overlap_is_b = False
self.n_overlap_size = None
self.c_overlap_is_b = False
self.c_overlap_size = None
self.check_overlaps()
# Loop proximity to peptide substrate
self.loop_near_substrate = None
self.closest_residue_distance = None
self.close_substrate_residues = []
self.residue_count = None
self.feature_size = None
if query_pose.num_chains() == 2:
self.find_proximity_to_substrate(query_pose, subject_pose, loop_res)
# Check loop similarity (if same size, take RMSD)
self.subject_loop_matches_query_length = None
self.rmsd = None
self.check_loop_rmsd(query_pose, subject_pose)
# Evaluate whether loop is a suitable target
self.is_near_target = None
self.is_not_domain = None
self.is_n_match = None
self.is_c_match = None
self.is_different_from_original = None
self.is_continuous = None
self.is_possible_target = None
self.evaluate_suitability()
def get_loop_range(self, aligned_residues, n_bound, c_bound):
"""
Take subset of aligned residues near loop between given boundary
residues. If no boundary is given (so the region includes a terminus),
collects everything to the end. Returns the shortened list.
"""
# Determine N-terminal boundary of subject loop based on most N-terminal
# residue matching the query. If no boundary is given (i.e. N terminus),
# takes first subject residue in the alignment
in_n_bound = False
for n, ar in enumerate(aligned_residues):
# Ignore all residues before N-boundary
if n_bound:
if ar.query_pdb_number:
if ar.query_pdb_number < n_bound:
continue
if ar.query_pdb_number == n_bound:
in_n_bound = True
else:
if not in_n_bound:
continue
# Defaults to the first subject residue within the boundary if no
# matching residues are found
if not self.n_boundary:
if ar.subject_pdb_number:
self.n_boundary = ar
n_term_index = n
# Moves in from boundary if a matching residue is found closer
if ar.residues_align:
self.n_boundary = ar
n_term_index = n
break
# Determine C-terminal boundary of subject loop based on most C-terminal
# residue matching the query. If no boundary is given (i.e. C terminus),
# takes first subject residue in the alignment
in_c_bound = False
for n, ar in enumerate(aligned_residues[::-1]):
# Ignore all residues after C-boundary
if c_bound:
if ar.query_pdb_number:
if ar.query_pdb_number > c_bound:
continue
if ar.query_pdb_number == c_bound:
in_c_bound = True
else:
if not in_c_bound:
continue
# Defaults to the first subject residue within the boundary if no
# matching residues are found
if not self.c_boundary:
if ar.subject_pdb_number:
self.c_boundary = ar
c_term_index = len(aligned_residues) - n - 1
# Moves in from boundary if a matching residue is found closer
if ar.residues_align:
self.c_boundary = ar
c_term_index = len(aligned_residues) - n - 1
break
# Return subset of aligned residues
return aligned_residues[n_term_index: c_term_index]
def collect_loop_match(self, aligned_residues, query_loop_start, query_loop_end):
"""
"""
# Determine regions of aligned_residues that are N-terminal of the loop,
# within the loop, and C-terminal of the loop
for n, ar in enumerate(aligned_residues):
if ar.query_pdb_number:
if ar.query_pdb_number == query_loop_start:
first_loop_res = n
if ar.query_pdb_number == query_loop_end:
last_loop_res = n
# Find matching residues on N-terminal side, going away from loop
for nn in aligned_residues[first_loop_res::-1]:
if nn.residues_align:
self.farthest_n_match = nn
if not self.nearest_n_match:
self.nearest_n_match = nn
if nn.subject_sec_struct == 'E':
self.farthest_n_b_match = nn
if not self.nearest_n_b_match:
self.nearest_n_b_match = nn
# Find matching residues on C-terminal side, going away from loop
for cn in aligned_residues[last_loop_res + 1:]:
if cn.residues_align:
self.farthest_c_match = cn
if not self.nearest_c_match:
self.nearest_c_match = cn
if cn.subject_sec_struct == 'E':
self.farthest_c_b_match = cn
if not self.nearest_c_b_match:
self.nearest_c_b_match = cn
return aligned_residues[first_loop_res:last_loop_res + 1]
def simple_pick_splice(self):
"""
"""
# Setting loop boundaries at closest matching residues, prioritizing
# b-sheet residues over unstructured ones
if self.nearest_n_b_match:
self.N_splice_res = self.nearest_n_b_match
else:
self.N_splice_res = self.nearest_n_match
if self.nearest_c_b_match:
self.C_splice_res = self.nearest_c_b_match
else:
self.C_splice_res = self.nearest_c_match
# Determining length of query loop from identified boundaries
if self.N_splice_res:
query_N_splice = self.N_splice_res.query_pdb_number
subject_N_splice = self.N_splice_res.subject_pdb_number
else:
query_N_splice = self.n_boundary.query_pdb_number
subject_N_splice = self.n_boundary.subject_pdb_number
if self.C_splice_res:
query_C_splice = self.C_splice_res.query_pdb_number
subject_C_splice = self.C_splice_res.subject_pdb_number
else:
query_C_splice = self.c_boundary.query_pdb_number
subject_C_splice = self.c_boundary.subject_pdb_number
self.query_loop_size = query_C_splice - query_N_splice
self.subject_loop_size = subject_C_splice - subject_N_splice
return
def check_overlaps(self):
"""
Determines range of overlapping matched residues from the nearest
matched residue flanking the loop to the farthest, on both N-terminal
and C-terminal sides of the loop.
"""
# Check N-term side for identified matching beta residues, taking that
# difference if available, difference of aligned loop residues otherwise
if self.nearest_n_b_match:
self.n_overlap_is_b = True
n_near_res = self.nearest_n_b_match.query_pdb_number
n_far_res = self.farthest_n_b_match.query_pdb_number
self.n_overlap_size = 1 + n_near_res - n_far_res
else:
# Only assign overlap if aligned residues are found
if self.nearest_n_match:
n_near_res = self.nearest_n_match.query_pdb_number
n_far_res = self.farthest_n_match.query_pdb_number
self.n_overlap_size = 1 + n_near_res - n_far_res
# Check C-term side for identified matching beta residues, taking that
# difference if available, difference of aligned loop residues otherwise
if self.nearest_c_b_match:
self.c_overlap_is_b = True
c_far_res = self.farthest_c_b_match.query_pdb_number
c_near_res = self.nearest_c_b_match.query_pdb_number
self.c_overlap_size = 1 + c_far_res - c_near_res
else:
# Only assign overlap if aligned residues are found
if self.nearest_c_match:
c_far_res = self.farthest_c_match.query_pdb_number
c_near_res = self.nearest_c_match.query_pdb_number
self.c_overlap_size = 1 + c_far_res - c_near_res
def find_proximity_to_substrate(self, query_pose, subject_pose, loop_residues):
"""
Finds all CA-CA distances between the coordinates of the substrate peptide
"""
substrate_coords = []
loop_coords = []
# Populating list of CA coordinates of substrate peptide
substrate_chain = query_pose.split_by_chain()[2]
for i in range(1, substrate_chain.total_residue() + 1):
substrate_coords.append(find_res_ca_coords(substrate_chain, i))
# Populating list of CA coordinates of loop
for lr in loop_residues:
if lr.subject_pose_number:
loop_coords.append(find_res_ca_coords(subject_pose, lr.subject_pose_number))
# Finding close residues
closest_distance = 1000 # Arbitrary large number
nearby_substrate_residues = []
for n, sr in enumerate(substrate_coords):
for lr in loop_coords:
substrate_dist = get_distance(sr, lr)
if substrate_dist < closest_distance:
closest_distance = substrate_dist
if substrate_dist <= 8:
if (n + 1) not in nearby_substrate_residues:
nearby_substrate_residues.append(n + 1)
# Determine largest 1D length of feature
max_interres_distance = 0
for lc in loop_coords:
for partner in [x for x in loop_coords if x != lc]:
interres_dist = get_distance(lc, partner)
if interres_dist > max_interres_distance:
max_interres_distance = interres_dist
# Updating attributes
self.loop_near_substrate = bool(nearby_substrate_residues)
self.closest_residue_distance = closest_distance
self.close_substrate_residues = nearby_substrate_residues
self.residue_count = len(loop_coords)
self.feature_size = max_interres_distance
return
def check_loop_rmsd(self, query_pose, subject_pose):
"""
If the subject and query loops are the same size, take CA RMSD
"""
if self.query_loop_size == self.subject_loop_size:
self.subject_loop_matches_query_length = True
# Get pose-numbered residues for loop termini
qn = self.N_splice_res.query_pose_number
sn = self.N_splice_res.subject_pose_number
qc = self.C_splice_res.query_pose_number
sc = self.C_splice_res.subject_pose_number
# Make residue selectors for loops
stemp = '{}-{}'
query_selector = ResidueIndexSelector(stemp.format(qn, qc))
subject_selector = ResidueIndexSelector(stemp.format(sn, sc))
# Calculate RMSD
self.rmsd = align_protein_sections(query_pose, query_selector,
subject_pose, subject_selector)
else:
self.subject_loop_matches_query_length = False
return
def evaluate_suitability(self):
"""
Runs a set of checks to determine whether the loop may be viable for
substitution. Rejects loops on the basis of being too far away from the
substrate to interact, being too large (avoiding domain insertions),
being of the same size as the original loop with a small RMSD, lacking
matched residues flanking the loop (cannot graft), or having gaps in the
crystal structure.
"""
# Check that there are residues within range of substrate
proximity_check = bool(self.close_substrate_residues)
# Check that loop residue count is not too large
res_count_check = self.subject_loop_size <= 50
# Check that if loop is the same size as the query, that it is
# structurally different, based on RMSD
similarity_check = not (self.rmsd and self.rmsd < 0.2)
# Check that there is a matched residue on the N-terminal side
if self.loop_name == 'N':
n_match_check = True
else:
n_match_check = bool(self.N_splice_res)
# Check that there is a matched residue on the C-terminal side
if self.loop_name == 'C':
c_match_check = True
else:
c_match_check = bool(self.C_splice_res)
# Discontinuity check
if self.N_splice_res and self.C_splice_res:
pdb_N_splice = self.N_splice_res.subject_pdb_number
pose_N_splice = self.N_splice_res.subject_pose_number
pdb_C_splice = self.C_splice_res.subject_pdb_number
pose_C_splice = self.C_splice_res.subject_pose_number
pdb_len = pdb_C_splice - pdb_N_splice
pose_len = pose_C_splice - pose_N_splice
discontinuity_check = pdb_len == pose_len
else:
discontinuity_check = True
# Updating attributes
self.is_near_target = proximity_check
self.is_not_domain = res_count_check
self.is_different_from_original = similarity_check
self.is_n_match = n_match_check
self.is_c_match = c_match_check
self.is_continuous = discontinuity_check
self.is_possible_target = all([proximity_check, res_count_check,
similarity_check, n_match_check,
c_match_check, discontinuity_check])
return
"""
pdb_list = glob('aligned_pdbs/*.pdb')
pdb_list.sort()
pdb_list.remove('aligned_pdbs/0000_master_pdb.pdb')
db_collect = []
for i in pdb_list:
print(i)
try:
subj_name = i.replace('aligned_pdbs/','').replace('.pdb', '')
subj_pose = pose_from_pdb(i)
pinf = protein_loop_alignment.protease_info('TEV', tev, subj_name, subj_pose)
pinf.auto_calculate()
db_collect.append(pinf)
except:
print(i, 'failed')
outfile = 'protease_db.pkl'
with open(outfile, 'wb') as o:
pickle.dump(db_collect, o)
with open('protease_database.csv', 'w') as w:
header = ['query', 'subject', 'Z_score', 'rmsd', 'lali', 'nres', 'pID']
header += ['cat_nucleophile', 'cat_his', 'cat_acid']
for k in tev_map.keys():
header += ['loop', 'length', 'potential_target', 'query_range', 'subject_range', 'reasons_rejected']
w.write(', '.join(header) + '\n')
with open('protease_database.csv', 'a') as w:
for i in db_collect:
line_info = []
line_info = [i.query_name, i.subject_name, i.Z_score, i.rmsd, i.lali, i.nres, i.pID]
if i.nucleophile_res:
line_info.append(i.nucleophile_type + str(i.nucleophile_res))
else:
line_info.append('None')
if i.catalytic_his:
line_info.append(i.catalytic_his_type + str(i.catalytic_his))
else:
line_info.append('None')
if i.catalytic_acid:
line_info.append(i.catalytic_acid_type + str(i.catalytic_acid))
else:
line_info.append('None')
for k in i.loop_maps.keys():
header += ['loop', 'length', 'swap_target', 'query_range', 'subject_range']
line_info += [k, i.loop_maps[k].residue_count, i.loop_maps[k].is_possible_target]
if i.loop_maps[k].is_possible_target:
line_info.append('-'.join([str(x) for x in [i.loop_maps[k].query_N_splice_res, i.loop_maps[k].query_C_splice_res]]))
line_info.append('-'.join([str(x) for x in [i.loop_maps[k].subject_N_splice_res, i.loop_maps[k].subject_C_splice_res]]))
line_info += ['']
else:
line_info += ['', '']
reject_reasons = []
if not i.loop_maps[k].is_near_target:
reject_reasons.append('Distance')
if not i.loop_maps[k].is_not_domain:
reject_reasons.append('Size')
if not i.loop_maps[k].is_n_match:
reject_reasons.append('No N match')
if not i.loop_maps[k].is_c_match:
reject_reasons.append('No C match')
line_info.append('; '.join(reject_reasons))
line_info = [str(i) for i in line_info]
w.write(', '.join(line_info) + '\n')
"""
"""
only allow D&E for acids
""" |
import classes as Classes
from copy import deepcopy
aux = 90
while(aux!=None):
print("1")
aux = None
a = 'aa'
print(not(type(a) is str))
print(type(a)) |
from django import forms
from .models import data, steps
class creatation(forms.Form):
title = forms.CharField(label='Tiêu đề', max_length=100)
author = forms.CharField(label='Tác giả', max_length=20)
step1 = forms.CharField(label='Bước 1', max_length=300)
#img1 = forms.ImageField(label='Ảnh minh họa')
step2 = forms.CharField(label='Bước 2', max_length=300)
#img2 = forms.ImageField(label='Ảnh minh họa')
step3 = forms.CharField(label='Bước 3', max_length=300)
#img3 = forms.ImageField(label='Ảnh minh họa')
step4 = forms.CharField(label='Bước 4', max_length=300)
#img4 = forms.ImageField(label='Ảnh minh họa')
def update_data(self):
a = data()
step = steps.objects.all()
a.title = self.cleaned_data['title']
a.author_name = self.cleaned_data['author']
num = 4
a.num_of_steps = num
s = ''
b1 = steps()
b1.step = self.cleaned_data['step1']
b1.save()
id = step[len(step) - 1].id
s = s + str(id) + ' '
b2 = steps()
b2.step = self.cleaned_data['step2']
b2.save()
s = s + str(id + 1) + ' '
b3 = steps()
b3.step = self.cleaned_data['step3']
b3.save()
s = s + str(id + 2) + ' '
b4 = steps()
b4.step = self.cleaned_data['step4']
b4.save()
s = s + str(id + 3) + ' '
a.steps_address = s
a.save()
return s
|
class EmailService(object):
EMAIL_SERVER={
"QQ":{"SMTP":["smtp.qq.com",465,True],"POP3":["pop.qq.com",995,True],"IMAP":["imap.qq.com",993,True]},
"GMAIL":{"SMTP":["smtp.gmail.com",465,True],"POP3":["pop.gmail.com",995,True],"IMAP":["imap.gmail.com",993,True]},
"FOXMAIL":{"SMTP":["SMTP.foxmail.com",465,True],"POP3":["POP3.foxmail.com",995,True],"IMAP":["imap.foxmail.com",993,True]},
"SINA":{"SMTP":["smtp.sina.com.cn",465,True],"POP3":["pop3.sina.com.cn",995,True],"IMAP":["imap.sina.com",993,True]},
"163":{"SMTP":["smtp.163.com",465,True],"POP3":["pop.163.com",995,True],"IMAP":["imap.163.com",993,True]},
"HOTMAIL":{"SMTP":["smtp.live.com",25,False],"POP3":["pop.live.com",995,True],"IMAP":["imap-mail.outlook.com",993,True]},
"OUTLOOK":{"SMTP":["smtp-mail.outlook.com",25,False],"POP3":["pop-mail.outlook.com",995,True],"IMAP":["imap-mail.outlook.com",993,True]},
"AOL":{"SMTP":["smtp.aol.com",25,False],"POP3":["pop.aol.com",110,False],"IMAP":["imap.aol.com",993,True]},
"YAHOO":{"SMTP":["smtp.mail.yahoo.com",465,True],"POP3":["pop.mail.yahoo.com",995,True],"IMAP":["imap.mail.yahoo.com",993,True]},
"21CN": {"SMTP": ["smtp.21cn.com", 465, True], "POP3": ["pop.21cn.com", 995, True],"IMAP": ["imap.21cn.com", 143, False]},
"GMX": {"SMTP": ["smtp.gmx.com", 465, True], "POP3": ["pop.gmx.com", 995, True],"IMAP": ["imap.gmx.com", 993, False]},
"WEB": {"SMTP": ["smtp.web.com", 465, True], "POP3": ["pop.gmx.com", 995, True],"IMAP": ["imap.web.com", 993, False]},
}
@staticmethod
def __getEmailServer(emailtype,servertype):
return EmailService.EMAIL_SERVER.get(emailtype.upper()).get(servertype)
@staticmethod
def getSMTPServer(emailtype):
return EmailService.__getEmailServer(emailtype.upper(),'SMTP')
@staticmethod
def getPOPServer(emailtype):
return EmailService.__getEmailServer(emailtype.upper(),'POP3')
@staticmethod
def getIMAPServer(emailtype):
return EmailService.__getEmailServer(emailtype.upper(),'IMAP') |
#!/usr/bin/env python3
'''
leetCode
1071. Greatest Common Divisor of Strings
https://leetcode.com/problems/greatest-common-divisor-of-strings/
'''
class Solution(object):
'''
def __init__(self,name,score):
self.str1 = str1
self.str2 = str2
'''
def gcdOfStrings(self, str1, str2):
"""
:type str1: str
:type str2: str
:rtype: str
"""
sameString = ''.join([a for a, b in list(zip(str1, str2)) if a == b])
# print("The same part is ", sameString)
for i in range(1, len(sameString)+1)[::-1]:
#print("str1 is ", str1)
#print("str2 is ", str2)
#print("subString is ", sameString[:i])
#print("split list1 is ",str1.split(sameString[:i]))
#print("split list2 is ",str2.split(sameString[:i]))
if not ''.join(str1.split(sameString[:i])) and not ''.join(str2.split(sameString[:i])):
return sameString[:i]
return ''
if __name__ == "__main__":
str1 = "ABABAB"
str2 = "ABAB"
s = Solution()
print(" The result is ",s.gcdOfStrings(str1, str2)) |
'''
'''
import gtk
import vwidget.main as vw_main
import vwidget.memview as vw_memview
import visgraph.layouts.dynadag as vg_dynadag
import visgraph.renderers.gtkrend as vg_rend_gtk
import vwidget.menubuilder as vw_menu
import vivisect.gui as viv_gui
import vivisect.base as viv_base
import vivisect.renderers as viv_rend
import vivisect.tools.graphutil as viv_graphutil
class VivGraphMemView(viv_gui.VivMemoryView, viv_base.VivEventCore):
def __init__(self, vw, gui, memwin):
viv_gui.VivMemoryView.__init__(self, vw, gui, memwin)
viv_base.VivEventCore.__init__(self, vw)
gui.addEventCore(self)
def renderMemory(self, va, size, rend=None):
self.beginva = va
vw_memview.MemoryView.renderMemory(self, va, size, rend=rend)
def vwGetPopup(self, textview, menu, vwfaddr=None):
va = self.selectva
vwmenu = vw_menu.FieldAdder(menu, splitchar='/')
vwmenu.addField('Graph/Color Node', self.popColorGraphNode, (self.beginva,), stockid=gtk.STOCK_SELECT_COLOR)
viv_gui.VivMemoryView.vwGetPopup(self, textview, menu, vwfaddr=vwmenu)
def VWE_SETFUNCMETA(self, vw, event, einfo):
fva, key, val = einfo
if key == 'BlockColors':
colorstr = val.get(self.beginva, '#0F0')
color = gtk.gdk.color_parse(colorstr)
self._hack_frame.modify_bg(gtk.STATE_NORMAL, color)
def popColorGraphNode(self, item, cbva):
dialog = gtk.ColorSelectionDialog("Select Color")
if dialog.run() == gtk.RESPONSE_OK:
col = dialog.colorsel.get_current_color()
fva = self.vw.getFunction(cbva)
cols = self.vw.getFunctionMeta(fva, 'BlockColors', default={})
cols[self.beginva] = str(col)
self.vw.setFunctionMeta(fva, 'BlockColors', cols)
dialog.destroy()
def goto(self, va, size=None, rend=None):
print 'GOTO 0x%.8x' % va
def addWidgets(vw, fva, graph):
rend = viv_rend.WorkspaceRenderer(vw)
colors = vw.getFunctionMeta(fva, 'BlockColors', default={})
tagtable = None
for nid,ninfo in graph.getNodes():
if ninfo.get('ghost'):
widget = gtk.Label()
print 'LABEL SIZE',widget.size_request()
else:
memview = VivGraphMemView(vw, vw._viv_gui, None)
if tagtable == None:
tagtable = memview.vwGetTagTable()
tagselect = memview.vaTagSelector
memview.vaTagSelector = tagselect # HACK
memview.vwSetTagTable(tagtable)
memview.set_policy(gtk.POLICY_NEVER, gtk.POLICY_NEVER)
memview.addRenderer('Viv', rend)
memview.renderMemory(ninfo['cbva'],ninfo['cbsize'])
color = gtk.gdk.color_parse(colors.get(ninfo['cbva'], '#0f0'))
widget = gtk.Frame()
widget.modify_bg(gtk.STATE_NORMAL, color)
widget.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
memview._hack_frame = widget
widget.add(memview)
widget.show_all()
ninfo['widget'] = widget
def destwin(widget, win):
win.destroy()
@vw_main.idlethread
def makeFuncGraphWindow(vw, fva):
graph = viv_graphutil.buildFunctionGraph(vw, fva, revloop=True)
addWidgets(vw, fva, graph)
rend = vg_rend_gtk.GtkVisGraphRenderer(graph)
layout = vg_dynadag.DynadagLayout(graph)
layout.renderGraph(rend)
s = gtk.ScrolledWindow()
s.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
s.add(rend)
oview = vg_rend_gtk.GtkVisGraphOverview(graph, layout, scrollwin=s)
w1 = gtk.Window()
w1.set_title('Overview')
w1.add(oview)
w1.show_all()
w1.resize(100,100)
oview.connect('destroy', destwin, w1)
w = gtk.Window()
w.set_title('Function Graph: 0x%.8x (%s)' % (fva, vw.getName(fva)))
w.add(s)
w.show_all()
w.resize(800, 800)
#def button_press_event(self, widget, item, event):
#print event.x
#print event.y
#print event.button
#w.connect('button_press_event', button_press_event)
|
class RC4:
S = [None] * 256
byte_to_int_mask = 255
max_key = 16
x = 0
y = 0
def set_key(self,key,offset=0):
if key is None or len(key) < self.max_key:
raise ValueError("Key length must be "+str(self.max_key))
#initalizing the S arrays
for i in range(256):
self.S[i] = i
#swapping around/mixing
j = 0
for i in range(256):
j = (j + self.S[i] + key[i & 15 + offset]) & self.byte_to_int_mask
self.swap(i,j)
def swap(self,i,j):
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
def encrypt(self,b):
return self.encryptOrDecrypt(b)
def decrypt(self,b):
return self.encryptOrDecrypt(b)
def encryptOrDecrypt(self,b):
self.x = (self.x + 1) & self.byte_to_int_mask
self.y = (self.y + self.S[self.x]) & self.byte_to_int_mask
self.swap(self.x,self.y)
return self.S[(self.S[self.x] + self.S[self.y]) & self.byte_to_int_mask]
|
Input_Config ={
'firstname' : ['get_name_real'],
'first_name' : ['get_name_real'],
'lastname' : ['get_name_real'],
'last_name' : ['get_name_real'],
'address' : ['apt_get'],
'homephone' : ['get_phone','get_phone_plus1','get_phone_3','get_phone_6','get_phone_10'],
'home_phone' : ['get_phone','get_phone_3','get_phone_6','get_phone_10'],
'workphone' : ['get_workphone','get_workphone_3','get_workphone_6','get_workphone_10','get_workphone_unique'],
'work_phone' : ['get_workphone','get_workphone_3','get_workphone_6','get_workphone_10','get_workphone_unique'],
'zip' : ['get_zip'],
'dateofbirth' : ['get_birthday_mm','get_birthday_dd','get_birthday_year','get_birthday_all'],
'email' : ['select_email_type'],
'cvv' : ['cvv_get'],
'katou' : ['get_zip'],
'year' : ['get_expiration_date'],
'fullname' : ['get_fullname'],
'phone' : ['get_phone_dadao','get_phone_de','get_phone_de_pre','get_phone_de_last'],
'date_of_birth' : ['get_birthday_mm','get_birthday_dd','get_birthday_year','get_birthday_all','get_birthday_all_2'],
'ssn' : ['get_ssn','ssn_first_3','ssn_mid_2','ssn_last4'],
'drivers_license': ['get_drivers_license'],
'routing_number' : ['get_routing_number'],
'account_number' : ['get_account_number'],
'net_monthly_income' : ['get_income','get_income_other'],
'employer':['get_employer_info'],
'occupation':['get_occupation_info'],
'name':['get_firstname','get_lastname','get_name_de'],
'email':['get_email'],
'id_number':['get_id_number']
}
Select_Config ={
'state' : ['get_name_real'],
'date_of_birth' : ['get_birthday_mm','get_birthday_dd','get_birthday_year','get_birthday_all'],
'requested_loan_amount':['get_next_payday_dd','get_next_payday_mm','get_next_payday_mm_str','get_next_payday_year','get_next_payday_all','get_next_payday_bi_str','get_next_payday2_dd','get_next_payday2_mm','get_next_payday2_mm_str','get_next_payday2_year','get_next_payday2_all','get_next_payday2_bi_str','get_next_payday_all_str'],
'pay_period':['get_next_payday_dd','get_next_payday_mm','get_next_payday_mm_str','get_next_payday_year','get_next_payday_all','get_next_payday_bi_str','get_next_payday2_dd','get_next_payday2_mm','get_next_payday2_mm_str','get_next_payday2_year','get_next_payday2_all','get_next_payday2_bi_str','get_next_payday_all_str'],
'katou':['get_state_byzip'],
'month':['month_change'],
'year':['year_change','year_change_short'],
}
Generate_Config = {
'dateofbirth' : ['get_birthday_mm','get_birthday_dd','get_birthday_year','get_birthday_all','get_birthday_all_2'],
'pwd' : ['password_get','password_get_Nostale','get_pwd_real','get_pwd_real2','get_pwd_real3','get_pwd_real4'],
'height' : ['get_height_ft','get_height_inch','get_height_weight'],
'fullname' : ['get_fullname'],
'firstname' : ['get_name_real'],
'next_pay_day':['get_next_payday_bi_str','get_next_payday2_bi_str','get_next_payday_all','get_next_payday2_all'],
'income' :['get_random_income'],
'nextpayday_dd' :['get_next_payday_dd'],
'nextpayday2_dd' :['get_next_payday2_dd'],
'nextpayday_mm' :['get_next_payday_mm'],
'nextpayday2_mm' :['get_next_payday2_mm'],
'nextpayday_mm_str' :['get_next_payday_mm_str'],
'nextpayday2_mm_str' :['get_next_payday2_mm_str'],
'nextpayday_yy' :['get_next_payday_year'],
'nextpayday2_yy' :['get_next_payday2_year'],
'nextpayday_all' :['get_next_payday_all'],
'nextpayday2_all' :['get_next_payday2_all'],
'nextpayday_bi_str' :['get_next_payday_bi_str'],
'nextpayday2_bi_str' :['get_next_payday2_bi_str'],
'hire_date':['hire_date'],
'year_at_residence_gen':['get_year_atresidence'],
'monthly_payment':['monthly_payment','rent_payment'],
'course':['get_courses'],
'email':['get_email'],
}
def get_input_config(key):
global Input_Config
print('Input_Config:',Input_Config)
if key in Input_Config:
print('Input_Config[key]:',Input_Config[key])
return Input_Config[key]
else:
print('Input_Config[key]:=====[]')
return []
def get_select_config(key):
global Select_Config
if key in Select_Config:
print('Input_Config[key]:',Select_Config[key])
return Select_Config[key]
else:
# print('Input_Config[key]:=====[]')
return []
def get_generate_config(key):
global Generate_Config
if key in Generate_Config:
print('Input_Config[key]:',Generate_Config[key])
return Generate_Config[key]
else:
# print('Input_Config[key]:=====[]')
return []
def get_generate_items():
global Generate_Config
return Generate_Config |
class Solution:
"""
https://leetcode.com/problems/reverse-bits/
"""
def reverseBits(self, n: int) -> int:
p = c = 0
while (n):
x = n%2
p = p<<1 | x
# or, ret += (n & 1) << power
n = n >> 1
c += 1
for i in range(c, 32):
p = p<<1
return p |
#The loop variable keeps the last value after the loop is over.
# You may want to know about this behaviour, but it is better not to count on this in real programs.
a=["din","kum","dha"]
for i in a:
print(i)
print(i) #last element
|
from unittest.mock import Mock
from model.event.event_bus import EventBus
class TestEventBus:
def test_bind_registers_event(self):
event_bus = EventBus()
event_name = 'event 1'
callbacks = [Mock(), Mock(), Mock()]
for callback in callbacks:
event_bus.bind(event_name, callback)
assert event_bus.events[event_bus.default_owner][event_name] == callbacks
def test_bind_registers_event_with_owner(self):
event_bus = EventBus()
event_name = 'event 1'
callbacks = [Mock(), Mock(), Mock()]
owner = Mock()
for callback in callbacks:
event_bus.bind(event_name, callback, owner)
assert event_bus.events[owner][event_name] == callbacks
def test_unbind_unregisters_event(self):
event_bus = EventBus()
event_name = 'event 1'
callback = Mock()
event_bus.bind(event_name, callback)
assert event_bus.events[event_bus.default_owner][event_name] == [callback]
event_bus.unbind(event_name, callback)
assert event_bus.events[event_bus.default_owner][event_name] == []
def test_unbind_unregisters_event_with_owner(self):
event_bus = EventBus()
event_name = 'event 1'
callback = Mock()
owner = Mock()
event_bus.bind(event_name, callback, owner)
assert event_bus.events[owner][event_name] == [callback]
event_bus.unbind(event_name, callback, owner)
assert event_bus.events[owner][event_name] == []
def test_trigger_triggers_event(self):
event_bus = EventBus()
event_name = 'event 1'
callback = Mock()
event_bus.bind(event_name, callback)
event_bus.trigger(event_name)
assert callback.called
def test_trigger_triggers_events_from_multiple_owners(self):
event_bus = EventBus()
event_name = 'event 1'
callbacks = [Mock(), Mock(), Mock()]
event_bus.bind(event_name, callbacks[0])
event_bus.bind(event_name, callbacks[1], Mock())
event_bus.bind(event_name, callbacks[2], Mock())
event_bus.trigger(event_name)
for callback in callbacks:
assert callback.called
def test_trigger_triggers_event_with_arguments(self):
event_bus = EventBus()
event_name = 'event 1'
callback = Mock()
args = [1, 'test', ';']
kwargs = {'size': (4, 5)}
event_bus.bind(event_name, callback)
event_bus.trigger(event_name, *args, **kwargs)
callback.assert_called_with(*args, **kwargs)
def test_unregister_removes_owners_events(self):
event_bus = EventBus()
event1 = 'event 1'
event2 = 'event 2'
callbacks = [Mock(), Mock()]
owner = Mock()
free_callbacks = [Mock(), Mock()]
event_bus.bind(event1, callbacks[0], owner)
event_bus.bind(event2, callbacks[1], owner)
event_bus.bind(event1, free_callbacks[0])
event_bus.bind(event2, free_callbacks[1])
event_bus.unregister(owner)
assert event_bus.events.get(owner, None) is None
assert event_bus.events.get(event_bus.default_owner, None) is not None
|
## Create directory path
import os
dataset_path = '/home/aaditya/Bach10/'
song_paths = []
for song_name in os.listdir(dataset_path) :
if (song_name != '.DS_Store') : song_paths.append(dataset_path + song_name + '/')
length = len(song_paths)
song_paths = sorted(song_paths)
print(song_paths)
## Create directories
mats_and_wavs = [[] for k in range(len(song_paths))]
for i in range(len(song_paths)):
mats_and_wavs[i].append(song_paths[i]+song_paths[i][21:-1]+'-GTF0s.mat')
mats_and_wavs[i].append(song_paths[i]+song_paths[i][21:-1]+'.wav')
print(mats_and_wavs[0][1])
## Read wavfile for the first song
import librosa
import IPython
song, sample_rate = librosa.load(mats_and_wavs[0][1])
# IPython.display.Audio(data = clipped_song, rate = sample_rate)
# sample_rate = 22050
## Compute STFT
window_size = int(0.084*sample_rate)
hop_size = int(0.01*sample_rate)
# Stft = librosa.stft(song, n_fft = window_size, hop_length=hop_size)
# Vft, phase = librosa.magphase(Stft)
# Plot
import numpy as np
#import matplotlib.pyplot as plt
#import librosa.display
#plt.figure(figsize=(10,5))
#librosa.display.specshow(librosa.amplitude_to_db(Vft,ref=np.max), x_axis='time', y_axis='log',
# sr=sample_rate, hop_length=hop_size)
#plt.colorbar()
#plt.tight_layout()
## Clip STFT
array = librosa.fft_frequencies(sr=sample_rate, n_fft=window_size)
print(array[171])
## Define gausian and triangular filters
import math
sqrt = math.sqrt
pi = math.pi
exp = math.exp
def gaussian(x,mu,sigma=3/8):
if x<0 : return 0
return (1/sqrt(2*pi*sigma))*exp(-(((x-mu)**2)/sigma**2))
## Define a single trinagular filter, exponentially distributed
def triangle_filter(f,a,fr=50,r=2):
# amplitude of the filter is 1
if(a==0) :
if(f<0) : return 0
else : return max(0,1-f/fr)
elif(a==1) :
if(f<fr) : return max(0,f/fr)
else : return max(0,(2*fr-f)/fr)
else :
f1 = r**(a-2)*fr
f2 = r**(a-1)*fr
f3 = r**a*fr
if(f<f2) : return max(0,(f-f1)/(f2-f1))
else : return max(0,(f3-f)/(f3-f2))
## Define K multiples of gaussian filter at the multiples of f0
def multiple_gaussian(K,f,f0,sigma=0.5) :
if f<0 : return 0
y=0
for k in range(K+1): y+=gaussian(f,k*f0,sigma)
return y
## Define basic block
def basic_block(K,f,a,f0,fr=50,r=2,sigma=0.5) :
return multiple_gaussian(K,f,f0,sigma)*triangle_filter(f,a,fr,r)
song_index=0
N = len(song_paths)
K = 10
A = 7
S = 4
P = 4
Z = 3
# Hyperparameters
sigma=0.5
fr = 50
r = 2
F = 172
T = 10000
from scipy.io import loadmat
for i in range(len(song_paths)):
matrix = np.array(loadmat(mats_and_wavs[i][0])['GTF0s']).astype('int')
T = min(T, matrix[0].shape[0])
print(T)
## Create f0_from_pt
def create_f0_from_pt(f0_from_pt):
global N,T
for song_index in range(N):
matrix = np.array(loadmat(mats_and_wavs[song_index][0])['GTF0s']).astype('int')
print(matrix.shape)
for t in range(T):
index=0
for p in range(P):
if matrix[p][t]!=0 and matrix[p][t]<F:
f0_from_pt[song_index][t][index]=matrix[p][t]
index+=1
f0_from_pt = np.zeros((N,T,P))
create_f0_from_pt(f0_from_pt)
## Update Pt_fbypa
def update_Pt_fbypa(song_index, Pt_fbypa):
global T,P,A,F,K
# from p to f0
for t in range(T):
print('Updating Pt_fbypa, iteration : '+str(t))
for p in range(P):
#print('Updating')
for a in range(A):
array = np.array([0.0 for k in range(F)])
total_basics=0
for f in range(F):
f0=f0_from_pt[song_index,t,p]
basic = basic_block(K,f,a,f0,fr,r,sigma)
total_basics += basic
array[f] = basic
#if basic!=0 : print(array[f])
if total_basics!=0 : array = array/total_basics
else : array = np.array([0.0 for k in range(F)])
Pt_fbypa[t,p,a,:]=array
print('Updated Pt_fbypa')
#Pt_fbypa = np.random.uniform(low=0.1, high=0.9, size=(T,P,A,F))
#update_Pt_fbypa(song_index, Pt_fbypa)
## Save files
directory = '/home/aaditya/store/'
def filepath(filename):
global directory
return directory+filename
#np.save(filepath('Pt_fbypa'), Pt_fbypa)
# Pt_p = np.random.uniform(low=0.1, high=0.9, size=(T,P))
# Pt_sbyp = np.random.uniform(low=0.1, high=0.9, size=(T,P,S))
# Pt_zbyps = np.random.uniform(low=0.1, high=0.9, size=(T,P,S,Z))
# P_abysz = np.random.uniform(low=0.1, high=0.9, size=(S,Z,A))
## Normalization rules
def normalize_Pt_p(T,Pt_p) :
for t in range(T):
Pt_p[t,:] = Pt_p[t,:]/sum(Pt_p[t,:])
def normalize_Pt_sbyp(T,P,Pt_sbyp) :
for t in range(T):
for p in range(P):
Pt_sbyp[t,p,:] = Pt_sbyp[t,p,:]/sum(Pt_sbyp[t,p,:])
def normalize_Pt_zbyps(T,P,S,Pt_zbyps) :
for t in range(T):
for p in range(P):
for s in range(S):
Pt_zbyps[t,p,s,:] = Pt_zbyps[t,p,s,:]/sum(Pt_zbyps[t,p,s,:])
def normalize_P_abysz(S,Z,P_abysz) :
for s in range(S):
for z in range(Z):
P_abysz[s,z,:] = P_abysz[s,z,:]/sum(P_abysz[s,z,:])
## Normalize
# normalize_Pt_p(T,Pt_p)
# normalize_Pt_sbyp(T,P,Pt_sbyp)
# normalize_Pt_zbyps(T,P,S,Pt_zbyps)
# normalize_P_abysz(S,Z,P_abysz)
## Update Pt_f
def update_Pt_f(Pt_f,Pt_p,Pt_sbyp,Pt_zbyps,Pt_fbypa,P_abysz) :
global T,F
for t in range(T):
print('Updating Pt_f, iteration : ' + str(t))
for f in range(F):
matrix=Pt_p[t,:]*Pt_sbyp[t,:,:]
matrix=np.reshape(matrix, list(matrix.shape) + [1])
matrix=matrix*Pt_zbyps[t,:,:,:]
matrix*=np.tensordot(Pt_fbypa[t,:,:,f],P_abysz[:,:,:],axes=(1,2))
Pt_f[t,f]=matrix.sum()
print('Updated Pt_f')
#update_Pt_f(Pt_f,Pt_p,Pt_sbyp,Pt_zbyps,Pt_fbypa,P_abysz)
#print(Pt_f.shape)
## Update rules
def three_dimensional_product(M1,M2):
A,B,C = M1.shape
B,C,D = M2.shape
M3 = np.zeros((A,B,C,D))
for a in range(A):
for d in range(D):
M3[a,:,:,d] = M1[a,:,:]*M2[:,:,d]
return M3
def four_dimensional_product(M1,M2):
A,B,C = M1.shape
B,C,D,E = M2.shape
M3 = np.zeros((A,B,C,D,E))
for a in range(A):
for d in range(D):
for e in range(E):
M3[a,:,:,d,e] = M1[a,:,:]*M2[:,:,d,e]
return M3
def update_Pt_pszabyf(Pt_pszabyf,Pt_f,Pt_p,Pt_sbyp,Pt_zbyps,Pt_fbypa,P_abysz):
global T,F,P,S,Z,A
for t in range(T):
print('Updating Pt_pszabyf, iteration : ' + str(t))
x = Pt_fbypa[t,:,:,:]/Pt_f[t,:] #x.shape = (4, 7, 172
x[np.isnan(x)]=0
x = np.swapaxes(x,0,2) #x.shape = (172, 7, 4)
x = np.swapaxes(x,1,2) #x.shape = (172, 7, 4)
# x
y=np.reshape(Pt_p[t,:], list(Pt_p[t,:].shape)+[1]) #y.shape = (4,1)
y=y*np.ones((1,S)) #y.shape = (4,5)
y=y*Pt_sbyp[t,:,:] #y.shape = (4,5)
y=np.reshape(y, list(y.shape) + [1]) #y.shape = (4,5,1)
y=y*Pt_zbyps[t,:,:,:] #y.shape = (4,5,3)
y.shape
#y
z = three_dimensional_product(y,P_abysz) #z.shape = (4,5,3,7)
z = np.swapaxes(z,1,3) #z.shape = (4,7,3,5)
#z
w = four_dimensional_product(x,z) #w.shape = (172,4,7,3,5)
w = np.swapaxes(w,2,4) #w.shape = (172,4,5,3,7)
Pt_pszabyf[t,:,:,:,:,:]=w
print('Updated')
## Update rules
def six_dimensional_product(Vft, Pt_pszabyf):
x = np.reshape(Vft[:F,:T].T, list(Vft[:F,:T].T.shape)+[1,1,1,1])
x = x*Pt_pszabyf
return x
def update_Pt_p(Pt_p, Vft_into_Pt_pszabyf):
# Vft, Pt_pszabyf
global T, F, P, S, Z, A
for t in range(T):
print('Updating Pt_p, iteration : '+str(t))
total = np.sum(Vft_into_Pt_pszabyf[t, :, :, :, :, :])
for p in range(P):
if total != 0:
Pt_p[t, p] = np.sum(Vft_into_Pt_pszabyf[t, :, p, :, :, :]) / total
else:
Pt_p[t, p] = 0
print('Updated')
def update_Pt_sbyp(Pt_sbyp, Vft_into_Pt_pszabyf):
# Vft, Pt_pszabyf
global T, F, P, S, Z, A
for t in range(T):
print('Updating Pt_sbyp, iteration : '+str(t))
for p in range(P):
# print('Updating')
total = np.sum(Vft_into_Pt_pszabyf[t, :, p, :, :, :])
for s in range(S):
if total != 0:
Pt_sbyp[t, p, s] = np.sum(Vft_into_Pt_pszabyf[t, :, p, s, :, :]) / total
else : Pt_sbyp[t, p, s]=0
print('Updated')
def update_Pt_zbyps(Pt_zbyps, Vft_into_Pt_pszabyf):
# Vft, Pt_pszabyf
global T, F, P, S, Z, A
for t in range(T):
print('Updating Pt_zbyps, iteration : '+str(t))
for p in range(P):
for s in range(S):
total = np.sum(Vft_into_Pt_pszabyf[t, :, p, s, :, :])
for z in range(Z):
if total != 0:
Pt_zbyps[t, p, s, z] = np.sum(Vft_into_Pt_pszabyf[t, :, p, s, z, :]) / total
else : Pt_zbyps[t, p, s, z]=0
print('Updated')
def update_P_abysz(P_abysz, Vft_into_Pt_pszabyf):
global T, F, P, S, Z, A
for s in range(S):
for z in range(Z):
print('Updating P_abysz, iteration : ' + str(s)+str(z))
total = np.sum(Vft_into_Pt_pszabyf[:, :, :, s, z, :])
for a in range(A):
if total != 0:
P_abysz[s, z, a] = np.sum(Vft_into_Pt_pszabyf[:, :, :, s, z, :]) / total
else:
P_abysz[s, z, a] = 0
print('Updated')
# Create Pt_fbys
def create_Pt_fbys(Pt_fbys, Pt_fbypa, Pt_p, Pt_sbyp, Pt_zbyps, P_abysz):
global T,S,F,P,A
for t in range(T):
print('Creating Pt_fbys, iteration : ' + str(t))
for s in range(S):
for f in range(F):
matrix=Pt_p[t,:]*Pt_sbyp[t,:,s]
matrix=np.reshape(matrix, list(matrix.shape) + [1])
matrix=matrix*Pt_zbyps[t,:,s,:]
matrix*=np.tensordot(Pt_fbypa[t,:,:,f],P_abysz[s,:,:],axes=(1,1))
Pt_fbys[t,s,f]=matrix.sum()
print("Created Pt_fbys")
# Create Vft_s
def create_Vft_s(Vft_s, Vft, Pt_fbys, Pt_f):
global S
for s in range(S):
Vft_s[s,:,:]=(Pt_fbys[:,s,:]/Pt_f).T*Vft[:F,:T]
Vft_s[np.isnan(Vft_s)]=0
## Update
def update(upto_index, max_iter):
global mats_and_wavs,T,F,P,S,Z,A
for i in range(upto_index):
## Spectrogram
song, sample_rate = librosa.load(mats_and_wavs[i][1])
Stft = librosa.stft(song, n_fft=window_size, hop_length=hop_size)
Vft, phase = librosa.magphase(Stft)
#Pt_fbypa = np.random.uniform(low=0.1, high=0.9, size=(T,P,A,F))
#update_Pt_fbypa(song_index, Pt_fbypa)
Pt_fbypa = np.load(filepath('Pt_fbypa_')+str(i)+'.npy')
## Initialize
Pt_f = np.random.uniform(low=0.1, high=0.9, size=(T, F))
Pt_p = np.random.uniform(low=0.1, high=0.9, size=(T, P))
Pt_sbyp = np.random.uniform(low=0.1, high=0.9, size=(T, P, S))
Pt_zbyps = np.random.uniform(low=0.1, high=0.9, size=(T, P, S, Z))
P_abysz = np.random.uniform(low=0.1, high=0.9, size=(S, Z, A))
Pt_fbys = np.random.uniform(low=0.1, high=0.9, size=(T, S, F))
Vft_s = np.random.uniform(low=0.1, high=0.9, size=(S, F, T))
## Normalize
normalize_Pt_p(T, Pt_p)
normalize_Pt_sbyp(T, P, Pt_sbyp)
normalize_Pt_zbyps(T, P, S, Pt_zbyps)
normalize_P_abysz(S, Z, P_abysz)
## Update
Pt_pszabyf = np.random.uniform(low=0.1, high=0.9, size=(T, F, P, S, Z, A))
for iteration in range(max_iter) :
# E_step
print(iteration)
update_Pt_pszabyf(Pt_pszabyf,Pt_f,Pt_p,Pt_sbyp,Pt_zbyps,Pt_fbypa,P_abysz)
Vft_into_Pt_pszabyf = six_dimensional_product(Vft, Pt_pszabyf)
# M_step
update_Pt_p(Pt_p, Vft_into_Pt_pszabyf)
update_Pt_sbyp(Pt_sbyp, Vft_into_Pt_pszabyf)
update_Pt_zbyps(Pt_zbyps, Vft_into_Pt_pszabyf)
update_P_abysz(P_abysz, Vft_into_Pt_pszabyf)
update_Pt_f(Pt_f,Pt_p,Pt_sbyp,Pt_zbyps,Pt_fbypa,P_abysz)
# Save
#np.save(filepath('Pt_fbypa_') + str(i) + '.npy', Pt_fbypa)
np.save(filepath('Pt_pszabyf_')+str(i)+'.npy', Pt_pszabyf)
np.save(filepath('Pt_f_')+str(i)+'.npy', Pt_f)
np.save(filepath('Pt_p_')+str(i)+'.npy', Pt_p)
np.save(filepath('Pt_sbyp_')+str(i)+'.npy', Pt_sbyp)
np.save(filepath('Pt_zbyps_')+str(i)+'.npy', Pt_zbyps)
np.save(filepath('P_abysz_')+str(i)+'.npy', P_abysz)
# Create Pt_fbys and Vft_s
create_Pt_fbys(Pt_fbys, Pt_fbypa, Pt_p, Pt_sbyp, Pt_zbyps, P_abysz)
np.save(filepath('Pt_fbys_')+str(i)+'.npy', Pt_fbys)
create_Vft_s(Vft_s, Vft, Pt_fbys, Pt_f)
np.save(filepath('Vft_s_')+str(i)+'.npy', Vft_s)
# Call
update(len(mats_and_wavs),2)
|
'''
Created on 24 Jan 2016
@author: craig
'''
from iFixList import IFixList
from addBehavior import AddBehavior
class OrdersList(IFixList, AddBehavior):
'''
classdocs
'''
def __init__(self):
self.idName = "ProductId"
self.idQ = "Quantity"
self.idCost = "UnitCost"
self.idPrice = "UnitPrice"
self.addStyle = AddBehavior()
|
#------------------------ FUNCTION DEFINITIONS ------------------------------
def compute_confusion_matrix(y_pred, y_true):
"""
'y_pred' is ndarray of predicted category probabilities
'y_true' is ndarray of true labels
Returns a type of confusion matrix comparing 'cats' vs. 'polygons'
'polygons_as_polygons' is sensitivity for polygons, true positive rate for polygons.
'polygons_as_cats' is false negative rate for polygons.
'cats_as_cats' is sensitivity for cats, true positive rate for cats.
'cats_as_polygons' is false negative rate for cats.
"""
condition_polygon = y_true[:,0].astype(bool)
condition_cat = y_true[:,1].astype(bool)
pred_df = pd.DataFrame(y_pred, columns=['polygon', 'cat'])
polygons_as_polygons = np.mean(np.rint(pred_df[condition_polygon].polygon))
polygons_as_cats = np.mean(np.rint(pred_df[condition_polygon].cat))
cats_as_cats = np.mean(np.rint(pred_df[condition_cat].cat))
cats_as_polygons = np.mean(np.rint(pred_df[condition_cat].polygon))
return {'polygons_as_polygons':polygons_as_polygons,
'polygons_as_cats':polygons_as_cats,
'cats_as_cats':cats_as_cats,
'cats_as_polygons':cats_as_polygons}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Tan Chao'
'''
logger wrapper.
use dictConfig.
'''
import logging
import logging.config
import yaml
def testLogging():
with open('dictconfig.yaml') as f:
dict_config = yaml.load(f)
logging.config.dictConfig(dict_config)
logger1 = logging.getLogger('foo.bar.baz')
logger2 = logging.getLogger('foo')
logger3 = logging.getLogger('bar')
print(logger1, logger2, logger3)
logger1.error('aaa')
logger2.error('bbb')
logger3.error('ccc')
if __name__ == '__main__':
# main()
# test()
testLogging() |
from typing import Callable, Union
from numbers import Number
from phi import math
from ._field import Field
from ..geom import Geometry
from phiml.math import Shape, spatial, instance, Tensor, wrap
class AngularVelocity(Field):
"""
Model of a single vortex or set of vortices.
The falloff of the velocity magnitude can be controlled.
Without a specified falloff, the velocity increases linearly with the distance from the vortex center.
This is the case with rotating rigid bodies, for example.
"""
def __init__(self,
location: Union[Tensor, tuple, list, Number],
strength: Union[Tensor, Number] = 1.0,
falloff: Callable = None,
component: str = None):
location = wrap(location)
strength = wrap(strength)
assert location.shape.channel.names == ('vector',), "location must have a single channel dimension called 'vector'"
assert location.shape.spatial.is_empty, "location tensor cannot have any spatial dimensions"
assert not instance(location), "AngularVelocity does not support instance dimensions"
self.location = location
self.strength = strength
self.falloff = falloff
self.component = component
spatial_names = location.vector.item_names
assert spatial_names is not None, "location.vector must list spatial dimensions as item names"
self._shape = location.shape & spatial(**{dim: 1 for dim in spatial_names})
def _sample(self, geometry: Geometry, **kwargs) -> Tensor:
points = geometry.center
distances = points - self.location
strength = self.strength if self.falloff is None else self.strength * self.falloff(distances)
velocity = math.cross_product(strength, distances)
velocity = math.sum(velocity, self.location.shape.batch.without(points.shape))
if self.component:
velocity = velocity.vector[self.component]
return velocity
@property
def shape(self) -> Shape:
return self._shape
def __getitem__(self, item: dict):
assert all(dim == 'vector' for dim in item), f"Cannot slice AngularVelocity with {item}"
if 'vector' in item:
assert item['vector'] == 0 or self.component is None
component = self.shape.spatial.names[item['vector']]
return AngularVelocity(self.location, self.strength, self.falloff, component)
else:
return self
|
#!/usr/bin/python
# calcualtes supervised weighted knn baseline from sparse format
# This code does not handle tie cases
import sys
import gzip
import pprint
import collections as col;
def map_keyfile(fname, withtags=False):
#returns: dd:index-> word array maps:?? keys: index->key from ptb formated file
#<s> and <\s> tags and emptylines are omitted during indexing
dd = []
keys = {}
maps = col.defaultdict(lambda: col.defaultdict(int))
for line in gzip.open(fname):
l = line.strip().split()
if len(l) == 0:
continue
dd.append(l[0])
maps[l[0]]["_A_"] += 1
maps[l[0]][len(dd) - 1] = maps[l[0]]["_A_"]
keys[len(dd) - 1] = l[1]
return (dd,maps,keys)
def knn_dist_sparse_gz(words, keys, k, debug = False):
if debug: print >> sys.stderr, "calculating"
cr, wr = 0, 0
for (r,line) in enumerate(sys.stdin, start = 0):
ll = line.strip().split()
ll.pop(0)
colum = len(ll)/2
ans = col.defaultdict(lambda: 0)
maxv,maxi = 0,-1
if keys[r] == "</s>":
continue
for j in range(k):
ref = int(ll[2*j])
if ref == r:
j -= 1
continue
if float(ll[2*j+1]) == 0:
ans[keys[ref]] += 1.0/10e-15;
else:
ans[keys[ref]] += 1.0/float(ll[2*j+1])
if ans[keys[ref]] > maxv:
maxv = ans[keys[ref]]
maxi = keys[ref]
if maxi == keys[r]:
cr += 1
else:
wr += 1
if debug and r % 100000 == 0:
print >> sys.stderr, r,
print >> sys.stderr, cr,"\t",wr, "\t", 1.0*cr /(wr+cr)
print cr,"\t",wr, "\t", 1.0*cr /(wr+cr)
keys = map_keyfile(sys.argv[1])
knn_dist_sparse_gz(keys[0],keys[2], int(sys.argv[2]), debug=False)
|
from fact import *
def fib(x):
j = 0
n = 1
print(j)
print(n)
for i in range(2,x):
r = j+n
j = n
n = r
print(r)
fact(10)
|
import tkinter as tk
from src.library import Library
class GUI:
def __init__(self, master):
self.master = master
self.library = Library()
self.create_window()
self.create_mainpage()
def create_window(self):
self.master.title("Game Manager")
self.master.geometry("800x600")
self.master.configure(background = "gray")
def create_mainpage(self):
self.master.grid_columnconfigure(2, minsize=80)
self.master.grid_rowconfigure(0, minsize=300)
self.submit = tk.Button(self.master, text="Submit", command=lambda: self.library.method(self.title.get())).grid(row=2, column=3)
self.title = tk.StringVar()
self.title_label = tk.Label(self.master, background='gray', text='Title: ').grid(row=1, column=0)
self.title_entry = tk.Entry(self.master, justify='center', textvariable=self.title).grid(row=1, column=1)
self.console = tk.StringVar()
self.console_label = tk.Label(self.master, background='gray', text='Console: ').grid(row=2, column=0)
self.console_entry = tk.Entry(self.master, justify='center', textvariable=self.title).grid(row=2, column=1)
self.status = tk.StringVar()
self.status_label = tk.Label(self.master, background='gray', text='Status: ').grid(row=3, column=0)
self.status_entry = tk.Entry(self.master, justify='center', textvariable=self.title).grid(row=3, column=1)
|
# function: transformation
# version: v02
# input: before_transformation(), type
# 0 for row Exchange
# 1 for right circular Shift
# 2 for middle clockwise rotation
# output: after_transformation()
# description:
# this funtion will manipulate current_state
# initial state 1 2 3 4 5 6 7 8
# E 8 7 6 5 4 3 2 1
# S 4 1 2 3 6 7 8 5
# R 1 7 2 4 5 3 6 8
def transformation(before_transformation, type):
before_transformation = list(before_transformation)
after_transformation = before_transformation[:]
if type == 0: # row Exchange
after_transformation[0] = before_transformation[7]
after_transformation[1] = before_transformation[6]
after_transformation[2] = before_transformation[5]
after_transformation[3] = before_transformation[4]
after_transformation[4] = before_transformation[3]
after_transformation[5] = before_transformation[2]
after_transformation[6] = before_transformation[1]
after_transformation[7] = before_transformation[0]
if type == 1: # right circular Shift
after_transformation[0] = before_transformation[3]
after_transformation[1] = before_transformation[0]
after_transformation[2] = before_transformation[1]
after_transformation[3] = before_transformation[2]
after_transformation[4] = before_transformation[5]
after_transformation[5] = before_transformation[6]
after_transformation[6] = before_transformation[7]
after_transformation[7] = before_transformation[4]
if type == 2: # middle clockwise rotation
after_transformation[0] = before_transformation[0]
after_transformation[1] = before_transformation[6]
after_transformation[2] = before_transformation[1]
after_transformation[3] = before_transformation[3]
after_transformation[4] = before_transformation[4]
after_transformation[5] = before_transformation[2]
after_transformation[6] = before_transformation[5]
after_transformation[7] = before_transformation[7]
return(tuple(after_transformation))
# test code
if __name__ == "__main__":
current_state = (1, 2, 3, 4, 5, 6, 7, 8)
print(current_state)
print(transformation(current_state, 0))
print(transformation(current_state, 1))
print(transformation(current_state, 2)) |
hourlyRate = 10
hoursWorked = 34
weeklyWage = hourlyRate*hoursWorked
print('Wages for\nFred\nFlintstone:', end='\n')
print('Hourly Rate: $%d\nHours Worked: %d\nWeekly Wage: $%d' % (hourlyRate, hoursWorked, weeklyWage)) |
from django.contrib import admin
from .models import SocialNetwork, TeamMember, MemberSocialNetwork
# Register your models here.
class MemberSocialNetworkInLine(admin.TabularInline):
"""
Inline admin for Social Network relation
"""
model = MemberSocialNetwork
class MemberAdmin(admin.ModelAdmin):
list_display = ('name', 'position', 'order')
ordering = ('order', )
inlines = [
MemberSocialNetworkInLine
]
admin.site.register(TeamMember, MemberAdmin)
admin.site.register(SocialNetwork)
|
import tkinter
from torch.nn.utils.rnn import *
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
#
# lst = []
#
# lst.append(torch.randn((1, 4)))
# lst.append(torch.randn((3, 4)))
# lst.append(torch.randn((5, 4)))
#
# sort_list = lst
# # sort_list = sorted(lst, key=len, reverse=True)
# list_len = list(map(len, sort_list))
# # print(sort_list)
# # print(list_len)
#
# lst = pad_sequence(sort_list, batch_first=True)
#
# print(lst.shape)
# lst_packed = pack_padded_sequence(lst, list_len[:3], batch_first=True)
#
# print(lst_packed[0].shape)
#
# lstm = nn.LSTM(4, 20, batch_first=True)
#
# out, _ = lstm(lst_packed)
# print(out[0].shape)
# pad_out, _ = pad_packed_sequence(out, batch_first=True)
#
# print(pad_out.shape)
# pred_text = torch.randn(4, 8)
# y = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0],
# [0, 1, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 0, 0, 0, 0, 0],
# [0, 0, 0, 1, 0, 0, 0, 0]])
#
# pred_text = (pred_text.numpy() == pred_text.numpy().max(axis=1, keepdims=1)).astype("float64")
# print(pred_text)
# pred_text = [np.argmax(item) for item in pred_text]
# print(pred_text)
# y = [np.argmax(item) for item in y]
# print(y)
# pred_text, y = np.array(pred_text), np.array(y)
# print(pred_text)
# print(y)
# per_text = pred_text == y
# print(per_text)
# text_acc = len(per_text[per_text == True]) / len(per_text) * 100
# print(text_acc)
import random
# x = np.random.randn(123, 214, 300)
# y = np.random.randn(123, 153, 300)
# x = np.pad(y, ((0, 0), (0, max(y.shape[1], x.shape[1]) - min(y.shape[1], x.shape[1])), (0, 0)),
# 'constant', constant_values=((0, 0), (0, 0), (0, 0)))
# print(x.shape)
#
# torch.nn.Embedding.from_pretrained()
|
import os
import time
import json
import pickle
import logging
from filelock import FileLock
import torch
import numpy as np
from transformers import PreTrainedTokenizerBase
logger = logging.getLogger(__name__)
class TextDataset(torch.utils.data.Dataset):
def __init__(self, tokenizer: PreTrainedTokenizerBase, file_path: str, block_size: int=512, overwrite_cache=False):
super(TextDataset, self).__init__()
self.path = file_path
assert os.path.isfile(file_path)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
directory, "cached_lm_{}_{}_{}".format(tokenizer.__class__.__name__, str(block_size), filename,),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
with open(cached_features_file, "rb") as handle:
self.data = pickle.load(handle)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
logger.info(f"Creating features from dataset file at {directory}")
self.data = []
with open(file_path, encoding="utf-8") as f:
for each_line in f:
obj = json.loads(each_line)
tokenized_source = tokenizer.encode(obj['source'], truncation=True, max_length=block_size, padding=True)
tokenized_target = tokenizer.encode(obj['target'], truncation=True, max_length=block_size, padding=True)
self.data.append((tokenized_source, tokenized_target))
# Note that we are losing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
start = time.time()
with open(cached_features_file, "wb") as handle:
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __getitem__(self, index:int) -> torch.Tensor:
return {
'source': torch.tensor(self.data[index][0], dtype=torch.long),
'target': torch.tensor(self.data[index][1], dtype=torch.long)
}
def __len__(self):
return len(self.data) |
# I pledge my honor that I have abided by the Stevens Honor System. Andrew Ozsu
def function(x):
for i in range(len(x)):
x[i]=x[i]*x[i]
return (x)
|
import gevent
from arago.actors.monitor import Monitor
from arago.actors.actor import Task, ActorStoppedError
class Router(Monitor):
def _route(self, msg):
"""Override in your own Router subclass"""
raise NotImplementedError
def _forward(self, task):
try:
target = self._route(task)
self._logger.trace("{me} is handing the task {task} to {target}".format(me=self, task=task, target=target))
task = target._enqueue(task) if target else None
return task
except ActorStoppedError as e:
gevent.idle()
self._logger.trace("{me} has failed to route {task} to {target} because {target} is stopped".format(me=self, task=task, target=target))
task.set_exception(e)
except Exception as e:
gevent.idle()
self._logger.trace("{me} has failed to route {task}: Determining target failed with {err}".format(me=self, task=task, err=e))
task.set_exception(e)
raise
def _handle(self, task):
self._logger.trace("{me} received task {t} for routing".format(me=self, t=task))
return self._forward(task)
def join(self):
self._logger.trace("{me} is waiting for all children to finish their work".format(me=self))
[child.join() for child in self._children]
|
"""
Module that calculates the number of hunks made to a commit file.
"""
from statistics import median
from pydriller import ModificationType
from pydriller.metrics.process.process_metric import ProcessMetric
class HunksCount(ProcessMetric):
"""
This class is responsible to implement the Number of Hunks metric for a
file. As a hunk is a continuous block of changes in a diff, this number
assesses how fragmented the commit file is (i.e. lots of changes all
over the file versus one big change).
If multiple commits are passed, it returns the median number of hunks in
that range.
"""
def count(self):
"""
Return the number of hunks for each modified file.
:return: int number of hunks
"""
renamed_files = {}
files = {}
for commit in self.repo_miner.traverse_commits():
for modified_file in commit.modified_files:
filepath = renamed_files.get(modified_file.new_path,
modified_file.new_path)
if modified_file.change_type == ModificationType.RENAME:
renamed_files[modified_file.old_path] = filepath
diff = modified_file.diff
is_hunk = False
hunks = 0
for line in diff.splitlines():
if line.startswith('+') or line.startswith('-'):
if not is_hunk:
is_hunk = True
hunks += 1
else:
is_hunk = False
if filepath in files:
files[filepath].append(hunks)
else:
files[filepath] = [hunks]
for path, hunks in files.items():
files[path] = median(hunks)
return files
|
import requests
import json
import urllib
import warnings
import os
from ._exceptions import *
warnings.formatwarning = warning_format
class here_API:
def __init__(self, apiKey=None, credentials_file=None):
if (apiKey is None) and (credentials_file is None):
raise CredentialsMissing()
elif not (apiKey is None or credentials_file is None):
warnings.warn(f"Two credentials options found. Extracting credentials from apiKey")
elif credentials_file is not None:
if not os.path.isfile(credentials_file):
raise CredentialsFileError()
self._credentials = {}
if apiKey:
self._credentials['apiKey'] = apiKey
else:
with open(credentials_file, 'r') as credentials_json:
self._credentials = json.load(credentials_json)['apiKey']
self.check_credentials()
self.free_flow_model = None
self.result = None
def set_energy_model(self, speeds=None, energy_consumption=None, model=None):
if (speeds is None or energy_consumption is None) and model is None:
EnergyModelError(speeds, energy_consumption, model)
if model:
self.free_flow_model = model
else:
self.free_flow_model = []
for idx, speed in enumerate(speeds):
self.free_flow_model.append(speed)
self.free_flow_model.append(energy_consumption[idx])
return self.free_flow_model
def make_request(self, origin, destination, time=None):
api_tag = self._build_credentials_tags()
self.origin = f'&origin={origin[0]},{origin[1]}'
self.dest = f'&destination={destination[0]},{destination[1]}'
if time: time = '&departureTime=' + time
else: time = ''
if self.free_flow_model: ev_field = self._transform_energy_model()
else: ev_field = ''
# build and make the the request
api_request = f'https://router.hereapi.com/v8/routes?transportMode=car{self.origin}{self.dest}{time}{ev_field}&return=summary,polyline,travelSummary,actions{api_tag}'
response = requests.get(api_request)
self.result = response.json()
if not self.result:
raise ResponseError
return self.result
def get_energy_consumption(self):
if self.result:
return self.result['routes'][0]['sections'][0]['summary']['consumption']
else:
raise ResponseError()
def get_route_distance(self):
if self.result:
return self.result['routes'][0]['sections'][0]['summary']['length']
else:
raise ResponseError()
def get_route_duration(self):
if self.result:
return self.result['routes'][0]['sections'][0]['summary']['duration']
else:
raise ResponseError()
def check_credentials(self):
api_tag = self._build_credentials_tags()
testorg = (40.667864, -73.994026)
testdest = (40.678123, -73.990967)
api_origin = f'&origin={testorg[0]},{testorg[1]}'
api_dest = f'&destination={testdest[0]},{testdest[1]}'
here_api_request = f'https://router.hereapi.com/v8/routes?transportMode=car{api_origin}{api_dest}&return=summary,polyline{api_tag}'
response = requests.get(here_api_request).json()
if 'error' in response.keys():
raise CredentialsError()
else:
print('Credentials OK')
def _transform_energy_model(self):
api_ev_field = '&ev[freeFlowSpeedTable]='
for element in self.free_flow_model:
api_ev_field += f'{element},'
return api_ev_field[:-1]
def _build_credentials_tags(self):
return f'&apiKey={self._credentials["apiKey"]}'
|
#!/usr/bin/python
import sys, os;
argvs = sys.argv;
argc = len(argvs);
prefix = "";
if argc > 1:
prefix = argvs[1] + "_";
for i in range(7, 16):
os.system("./calc.k " + prefix + "bt" + str(i) + ".log");
|
import argparse
import pickle
from drive_mix_v2 import DrivingMix2
import numpy as np
import pandas as pd
import time
# Different ethical state for the negative, positive and mixed policies, but same general state for learning
parser = argparse.ArgumentParser(description='ethical agent')
parser.add_argument('--p_ethical', action='store_true',
help='indicate whether learn the Rescuing policy')
parser.add_argument('--n_ethical', action='store_true',
help='indicate whether learn the Avoiding policy')
parser.add_argument('--m_ethical', action='store_true',
help='indicate whether learn the Mixing policy')
parser.add_argument('--c', type=float, default=0.9,
help='a parameter to determine the human policy (default: 0.6)')
parser.add_argument('--cn', type=float, default=2,
help='scale of the additioal punishment (default: 2)')
parser.add_argument('--cp', type=float, default=2,
help='scale of the additional reward (default: 2)')
parser.add_argument('--taun', type=float, default=0.2,
help='threshold to determine negatively ethical behavior (default: 0.2)')
parser.add_argument('--taup', type=float, default=0.55,
help='threshold to determine positvely ethical behavior (default: 0.55)')
parser.add_argument('--temp', type=float, default=0.7,
help='the temperature parameter for Q learning policy (default: 0.7)')
parser.add_argument('--seed', type=int, default=1234,
help='random seed (default: 1234)')
parser.add_argument('--lr', type=float, default=0.1,
help='learning rate (default: 0.1)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor (default: 0.99)')
parser.add_argument('--num_episodes', type=int, default=2000,
help='number of episdoes (default: 1000)')
parser.add_argument('--verbose', action='store_true',
help='show log')
parser.add_argument('--record_beg', type=int, default=600,
help='begin to record trajectories')
parser.add_argument('--id', type=str, default= '0',
help='identify the experiment')
args = parser.parse_args()
actions = range(3)
np.random.seed(args.seed)
Q = {}
if args.n_ethical == True:
dr = DrivingMix2(ishuman_n=True)
policy_name = 'human_n_mix'
elif args.p_ethical == True:
dr = DrivingMix2(ishuman_p=True)
policy_name = 'human_p_mix'
else:
dr = DrivingMix2(ishuman_m=True)
policy_name = 'human_m_mix'
trajectory = {}
episode_rewards = []
collisions = []
cat_hits = []
elders_saved = []
def kl_div(p1, p2):
total = 0.
for idx in range(len(p1)):
total += -p1[idx]*np.log(p2[idx]/p1[idx])
return total
for cnt in range(args.num_episodes):
state = dr.reset()
#state = state[:2]
rewards = 0.
prev_pair = None
prev_reward = None
frame = 0
while True:
frame += 1
probs = []
for action in actions:
try:
probs.append(np.e**(Q[(state, action)]/args.temp))
except:
Q[(state, action)] = np.random.randn()
probs.append(np.e**(Q[(state, action)]/args.temp))
total = sum(probs)
probs = [p / total for p in probs]
action = np.random.choice(3, 1, p=probs)[0]
if args.verbose: print(probs, state, action)
if args.n_ethical == True:
ethical_state = (state[2], state[5], state[8], state[10])
elif args.p_ethical == True:
ethical_state = (state[3], state[6], state[9])
else:
ethical_state = (state[2], state[3], state[5], state[6], state[8], state[9], state[10])
if cnt > args.record_beg:
try:
trajectory[(ethical_state, action)] += 1
except:
trajectory[(ethical_state, action)] = 1
if prev_pair is not None:
Q[prev_pair] = Q[prev_pair] + args.lr * (prev_reward + args.gamma * Q[(state, action)] - Q[prev_pair])
next_state, reward, done = dr.step(action)
prev_pair = (state, action)
prev_reward = reward
rewards += reward
if done:
Q[prev_pair] = Q[prev_pair] + args.lr * (prev_reward - Q[prev_pair])
break
state = next_state
collision, cat_hit, elder_saved = dr.log()
collisions.append(collision)
cat_hits.append(cat_hit)
elders_saved.append(elder_saved)
episode_rewards.append(rewards)
if cnt % 100 == 0:
print('episode: {}, frame: {}, total reward: {}'.format(cnt, frame, rewards))
df = pd.DataFrame(np.array(episode_rewards))
df.to_csv('./record/{}_{:.2f}_{:.2f}_{}_steps.csv'.format(args.id, args.temp, args.gamma, policy_name), index=False)
dfp = pd.DataFrame(np.array(collisions))
dfp.to_csv('./record/{}_{:.2f}_{:.2f}_{}_collisions.csv'.format(args.id, args.cp, args.taup, policy_name), index=False)
dfn = pd.DataFrame(np.array(cat_hits))
dfn.to_csv('./record/{}_{:.2f}_{:.2f}_{}_cat_hits.csv'.format(args.id, args.cn, args.taun, policy_name), index=False)
dfpn = pd.DataFrame(np.array(elders_saved))
dfpn.to_csv('./record/{}_{:.2f}_{:.2f}_{}_elders_saved.csv'.format(args.id, args.cp, args.taup, policy_name), index=False)
with open('./policies/'+str(args.id)+'_hpolicy_drive_'+policy_name+'.pkl', 'wb') as f:
pickle.dump(trajectory, f, pickle.HIGHEST_PROTOCOL)
|
import sys
data = sys.stdin.readline()
data = ord(data[0])
minVal1 = ord("A")
maxVal1 = ord("Z")
minVal2 = ord("a")
maxVal2 = ord("z")
if data >= minVal1 and data <= maxVal1:
print(chr(data+32))
elif data >= minVal2 and data <= maxVal2:
print(chr(data-32))
else :
print("문자만 입력하세요") |
#!/usr/bin/python
from statistics import mean
num=input("Enter set of numbers to find average:")
res=[int(x) for x in str(num)]
#res=[num]
a=mean(res)
print(a)
|
import time
import logging
from parser import DefaultParser
from fetcher import DefaultFetcher, HttpsFetcher
from logger import set_up_logging
logger = set_up_logging()
class Crawler:
'''Crawl a set to urls.
Long description
'''
def __init__(self, roots,
exclude=None, strict=True,
max_redirect=10, max_tries=4,
max_tasks=10, loop=None):
#self.loop = loop or asyncio.get_event_loop()
self.roots = roots
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.task_queue = set() # TODO: add scheduler for queue management.
self.seen_urls = set()
self.done = []
self.root_domains = set()
self.fetcher = DefaultFetcher()
self.parser = DefaultParser()
self.t0 = time.time()
self.t1 = None
def work(self):
'''Process queue items forerver.'''
try:
while True:
url, max_redirect = self.task_queue.get()
assert url in self.seen_urls
#self.fetch(url, max_redirect)
#self.task_queue.task_done()
except asyncio.CancelledError:
pass
def crawl(self):
'''Run the crawler until all finished.'''
self.t0 = time.time()
for task in range(1): #self.task_queue:
resp = self.fetcher.fetch('https://stackoverflow.com', 443)
# TODO: Implement self.validate_response(resp)
new_urls = self.parser.parse_link(resp)
print('new urls', list(new_urls))
self.t1 = time.time()
def main():
# roots = {fix_url(root) for root in args.roots}
roots = set(['https://api.yeongmang.com'])
crawler = Crawler(roots,
max_redirect=10,
max_tries=10,
max_tasks=20,
)
try:
crawler.crawl()
except KeyboardInterrupt:
sys.stderr.flush()
print('Interrupted')
if __name__ == '__main__':
main()
|
def is_coprime_phi(phi, coprime_to_check):
while phi % coprime_to_check == 0:
coprime_to_check = input("Enter a prime number, to check if coprime with phi")
e = coprime_to_check
return True
if not is_coprime_phi(phi,e):
raise ValueError("e is not coprime with phi_n")
def egcd(x, y):
if x == 0:
return (y, 0, 1)
else:
g, b, a = egcd(y % x, x)
return (g, a - (y // x) * b, b)
def modinv(coprime, phi_n):
g, a, b = egcd(coprime, phi_n)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return a % phi_n
p=
q=
e=
n=p*q
phi=(p-1)*(q-1)
x = input("Select the option from the below :\n 1. Encryption\n 2. Decryption\n")
if x==1:
encrypt()
elif x==2:
decrypt()
else:
print("Enter a valid number!")
|
from tkinter import Button, Label, Frame, Tk, RAISED, NSEW, Entry
import os
from functions import (add_as_dict, check_file_exists, verify_not_empty,
add_in_freq, only_number)
from html_func import update_table
from datetime import datetime
import time
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
class Window(Frame): # All the stuff for the GUI
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
# self.master.minsize(width=400, height=200)
self.configure(relief=RAISED, borderwidth=10) #
self.init_window()
self.grid(sticky=NSEW) #
def init_window(self):
self.master.title("إضافة جديد شخصي")
# configure weights; note: that action need for each container!
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
for i in range(5):
self.columnconfigure(i, weight=1)
def line_break(root, row):
Label(root, text="").grid(row=row, column=1)
def col_break(root, row, col):
Label(root, text=' ').grid(row=row, column=col)
def add_new():
# getting the path
x = '../الدفاتر/دفتر الجديد شخصي'
path = os.path.join(x)
# getting the time and formating it
x = datetime.now()
day = '{}-{}-{}'.format(x.year, x.month, x.day)
timing = '{}:{}'.format(x.hour, x.minute)
# define labels and values and adding them to dictionary
labels = ['الإسم',
'بطاقة علاج رقم',
'الرقم داخل الهيئة',
'القسم',
'اليوم',
'الوقت',
'محول من',
'اسم المسجل']
values = [name_e.get(), hc_e.get(), id_e.get(), sec_e.get(),
day, timing, doc_e.get(), entery_e.get()]
line = dict(zip(labels, values))
# verify that all fields aren't empty
if verify_not_empty(line):
errlb = 'من فضلك تأكد من ادخال جميع البيانات'
lbl = Label(entery_frame,
text=errlb,
font=("Purisa", 12),
bg='red', fg='white')
lbl.grid(row=12, column=1)
return 0
# if the file exists print rows, if not print messag
if check_file_exists(path):
add_as_dict(path, line)
add_in_freq(line, 'شخصي')
update_table(line, "دفتر الجديد شخصي", new=True)
line['دفتر'] = 'شخصي'
update_table(line, "دفتر التردد")
time.sleep(0.2)
root.destroy()
else:
err_lbl = 'تأكد من وجود دفتر الجديد شخصي في ملف "الدفاتر"'
lbl = Label(entery_frame,
text=err_lbl,
font=("Purisa", 12))
lbl.grid(row=8, column=1)
# making 2 frames
entery_frame = Frame(self)
button_frame = Frame(self)
entery_frame.pack(side="top", fill="both", expand=True)
button_frame.pack(side="bottom", fill="both", expand=True)
# Frame 1
# adding spacing in 1st row and column
space = '\t\t\t'
Label(entery_frame, text=space, fg='Grey').grid(row=0,
column=0, sticky=NSEW)
# name entry
name_e = Entry(entery_frame, text='ادخل الاسم', width=30,
font=("Purisa", 18), justify='right')
name_e.grid(row=1, column=1, sticky=NSEW)
lbl = Label(entery_frame, text='الإسم', font=("Purisa", 18))
lbl.grid(row=1, column=3)
# health card number
validation = root.register(only_number)
hc_e = Entry(entery_frame, text='رقم بطاقة العلاج', width=30,
validate='key', validatecommand=(validation, '%S'),
font=("Purisa", 18), justify='right')
hc_e.grid(row=3, column=1, sticky=NSEW)
lbl = Label(entery_frame, text='رقم بطاقة العلاج', font=("Purisa", 18))
lbl.grid(row=3, column=3)
# id in work entery
id_e = Entry(entery_frame, text='الرقم داخل الهيئة', width=30,
validate='key', validatecommand=(validation, '%S'),
font=("Purisa", 18), justify='right')
id_e.grid(row=5, column=1, sticky=NSEW)
lbl = Label(entery_frame, text='الرقم داخل الهيئة',
font=("Purisa", 18))
lbl.grid(row=5, column=3)
# sec in work entery
sec_e = Entry(entery_frame, text='القسم', width=30,
font=("Purisa", 18), justify='right')
sec_e.grid(row=7, column=1, sticky=NSEW)
lbl = Label(entery_frame, text='القسم', font=("Purisa", 18))
lbl.grid(row=7, column=3)
# From who?
doc_e = Entry(entery_frame, text='محول من', width=30,
font=("Purisa", 18), justify='right')
doc_e.grid(row=9, column=1, sticky=NSEW)
lbl = Label(entery_frame, text='مُحوَل مِن', font=("Purisa", 18))
lbl.grid(row=9, column=3)
# who entered data?
entery_e = Entry(entery_frame, text='اسم المسجل', width=30,
font=("Purisa", 18), justify='right')
entery_e.grid(row=11, column=1, sticky=NSEW)
lbl = Label(entery_frame, text='اسم المسجل', font=("Purisa", 18))
lbl.grid(row=11, column=3)
for i in range(6):
line_break(entery_frame, (i+1)*2)
col_break(entery_frame, row=7, col=2)
col_break(entery_frame, row=7, col=4)
# Frame 2
# adding spacing in 1st row and column
space = '\t\t\t\t'
Label(button_frame, text=space, fg='Grey').grid(row=0,
column=0, sticky=NSEW)
# Quit Button
quitButton = Button(button_frame, command=root.destroy,
text="خروج", width=20, height=2)
quitButton.grid(row=0, column=1, sticky=NSEW) #
# input Button
encryptModeButton = Button(button_frame, command=add_new,
text="تسجيل", width=20, height=2)
encryptModeButton.grid(row=0, column=3, sticky=NSEW) #
line_break(button_frame, 1)
col_break(button_frame, row=0, col=2)
root = Tk()
root.geometry("725x500+200+100")
app = Window(root)
root.mainloop()
|
__author__ = 'luca'
from videos.video import Video
from images.image_comparator import ImageComparator
class VideoComparator(object):
def __init__(self, video, searcher):
self._video = video
self._searcher = searcher
def compared_frames_statuses(self, motion_threshold, MAD_threshold):
holden_frames = [0] #start with the first frame
discarded_frames = []
i = 0
j = 1
while i < self._video.frames_count()-2 and j < self._video.frames_count()-1:
#controllo il frame successivo per vedere se sono sotto la soglia
#si lo sono, posso aggiungere il frame alla lista di quelli da non considerare
#no non lo sono, il frame e necessario
if i is j:
print "CYCLE COMPARISON ERROR"
print "\nComparing frame #%d with frame #%d" %(i, j)
frame_1 = self._video.frames[i].grayscaled_image()
frame_2 = self._video.frames[j].grayscaled_image()
comp = ImageComparator(frame_1)
vectors = comp.get_motion_vectors(frame_2, self._searcher, MAD_threshold)
longest_vector, max_distance = ImageComparator.longest_motion_vector(vectors)
print "Max distance found: %f" %max_distance
print "Longest vector is: "+ str(longest_vector)
if max_distance < motion_threshold:
print "Frame #%d discared.. :-) " %j
discarded_frames.append(j) #the compared frame contains only short motion vectors, so I can discard that frame
j += 1 #the I frame is same, the frame to be compared is the j+1 so the search continue
else:
print "Frame #%d holden... :-(" %j
holden_frames.append(j) #the compared frame contains a very long motion vector, so the frame will be rendered as frame I
i = j
j = i+1
holden_frames.append(self._video.frames_count()-1) #keep the last frame
return holden_frames, discarded_frames
|
# Generated by Django 3.0.3 on 2020-07-30 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contactapp', '0003_auto_20200730_1511'),
]
operations = [
migrations.AlterField(
model_name='person',
name='email',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='person',
name='phonenumber',
field=models.CharField(max_length=10),
),
]
|
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
digits = load_digits()
y = digits.target == 8
X_train, X_test, y_train, y_test = train_test_split(
digits.data, digits.target, random_state=0)
lr = LogisticRegression().fit(X_train, y_train)
pred = lr.predict(X_test)
print("Accuracy: {:.3f}".format(accuracy_score(y_test, pred)))
print("confusion matrix:\n{}".format(confusion_matrix(y_test, pred)))
######
import matplotlib.pylab as plt
import mglearn
scores_image = mglearn.tools.heatmap(
confusion_matrix(y_test, pred), xlabel='prediction',
ylabel='class', xticklabels=digits.target_names,
yticklabels=digits.target_names, cmap=plt.cm.gray_r, fmt="%d")
plt.title("confusion matrix")
plt.gca().invert_yaxis()
plt.show()
######
from sklearn.metrics import classification_report
print(classification_report(y_test, pred))
from sklearn.metrics import f1_score
print("micro mean_f1_score: {:.3f}".format(f1_score(y_test, pred, average="micro")))
print("macro mean_f1_score: {:.3f}".format(f1_score(y_test, pred, average="macro"))) |
"""
添加人员
"""
# from app.企业微信.page.contactAddPage import ContactAddPage
from appium.webdriver.common.mobileby import MobileBy
from app.企业微信po.page.basepage import BasePage
class AddMeberPage(BasePage):
# def __init__(self,driver):
# self.driver = driver
add_manual_element = (MobileBy.XPATH, "//*[@text='手动输入添加']")
toast_element = (MobileBy.XPATH, "//*[@class='android.widget.Toast']")
def add_meual(self):
"""
手动输入添加
:return:
"""
# self.driver.find_element(MobileBy.XPATH, "//*[@text='手动输入添加']").click()
self.find_and_click(self.add_manual_element) # 改造后
from app.企业微信po.page import ContactAddPage
return ContactAddPage(self.driver)
def get_toast(self):
# 判断是否添加成功 TOAST
# self.driver.find_element(MobileBy.XPATH, "//*[@class='android.widget.Toast']")
# element = WebDriverWait(self.driver, 10).until(
# lambda x: x.find_element(MobileBy.XPATH, "//*[@class='android.widget.Toast']"))
element = self.webderiver_wait(self.toast_element)
result = element.text
# text = '成功'
return result
|
#This file is part of AstroHOG
#
# Copyright (C) 2013-2017 Juan Diego Soler
import sys
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
sys.path.append('/Users/jsoler/Documents/astrohog/')
from astrohog import *
from astropy.wcs import WCS
from reproject import reproject_interp
def astroHOGexampleLOFAR(frame, vmin, vmax, ksz=1):
fstr="%4.2f" % frame
dir='3C196/'
hdu1=fits.open(dir+'3C196_LOFAR_RMcube_10.fits')
hdu2=fits.open(dir+'3C196fwhm5_logNHmap.fits')
hdu3=fits.open(dir+'3C196fwhm30_Qmap.fits')
hdu4=fits.open(dir+'3C196fwhm30_Umap.fits')
hdu5=fits.open(dir+'3C196fwhm30_LIC.fits')
Qmap=hdu3[0].data
Umap=hdu4[0].data
LICmap=hdu5[0].data
psi=0.5*np.arctan2(-Umap, Qmap)
ex=np.sin(psi)
ey=np.cos(psi)
RMcube=hdu1[0].data
v1=vmin*1000.; v2=vmax*1000.
v1str="%4.1f" % vmin
v2str="%4.1f" % vmax
limsv=np.array([v1, v2, v1, v2])
sz1=np.shape(hdu1[0].data)
CTYPE3=hdu1[0].header['CTYPE3']
CDELT3=hdu1[0].header['CDELT3']
CRVAL3=hdu1[0].header['CRVAL3']
CRPIX3=hdu1[0].header['CRPIX3']
zmin1=0 #int(CRPIX3+(v1-CRVAL3)/CDELT3)
zmax1=sz1[0]-1 #int(CRPIX3+(v2-CRVAL3)/CDELT3)
velvec1=hdu1[0].header['CRVAL3']+np.arange(sz1[0])*hdu1[0].header['CDELT3'] #np.arange(v1,v2,CDELT3)/1000.
refhdr=hdu1[0].header.copy()
NAXIS3=refhdr['NAXIS3']
del refhdr['NAXIS3']
del refhdr['CTYPE3']
del refhdr['CRVAL3']
del refhdr['CRPIX3']
del refhdr['CDELT3']
del refhdr['CUNIT3']
refhdr['NAXIS']=2
sz2=np.shape(hdu2[0].data)
galRMcube=np.zeros([NAXIS3, sz2[0], sz2[1]])
for i in range(0, NAXIS3):
hduX=fits.PrimaryHDU(RMcube[i,:,:]); hduX.header=refhdr
mapX, footprintX=reproject_interp(hduX, hdu2[0].header)
galRMcube[i,:,:]=mapX
# ==========================================================================================================
sz1=np.shape(galRMcube)
#x=np.sort(galRMcube.ravel())
#minrm=x[int(0.2*np.size(x))]
minrm=np.std(galRMcube[0:5,:,:])
mask1=np.zeros(sz1)
mask1[(galRMcube > minrm).nonzero()]=1
mask2=np.zeros(sz2)+1.
#mask2[:,ymin:ymax,:]=1
#mask2[(hdu2[0].data < 0.0).nonzero()]=0
# ==========================================================================================================
corrplane, corrcube=HOGcorr_cube(galRMcube, np.array([hdu2[0].data]), zmin1, zmax1, 0, 0, ksz=ksz, mask1=mask1, mask2=mask2)
plt.plot(velvec1, corrplane.ravel())
plt.xlabel('RM')
plt.ylabel('Correlation')
plt.show()
# ==========================================================================================================
#corrvec0, corrcube0=HOGcorr_cubeandpol(galRMcube, ex, ey, zmin1, zmax1, ksz=ksz)
corrvec1, corrcube1=HOGcorr_cubeandpol(galRMcube, ex, ey, zmin1, zmax1, ksz=ksz, mask1=mask1, mask2=mask2, rotatepol=True)
#plt.plot(velvec1, corrvec0, 'r')
plt.plot(velvec1, corrvec1, 'b')
plt.xlabel(r'RM [rad m^-2]')
plt.ylabel('PRS correlation')
plt.show()
gradQ=np.gradient(Qmap)
gradU=np.gradient(Umap)
P=np.sqrt(Qmap**2+Umap**2)
gradP=np.sqrt(gradQ[0]**2+gradU[0]**2+gradQ[1]**2+gradU[1]**2)
gradPoverP=gradP/P
imax=(corrvec1 == np.max(corrvec1)).nonzero()[0][0]
ax1=plt.subplot(1,1,1, projection=WCS(hdu2[0].header))
ax1.imshow(np.log10(galRMcube[imax,:,:]), origin='lower', cmap='rainbow') #, interpolation='none')
ax1.imshow(LICmap, origin='lower', alpha=0.55, cmap='binary', clim=(0.04, 0.075))
ax1.coords.grid(color='white')
ax1.coords['glon'].set_axislabel('Galactic Longitude')
ax1.coords['glat'].set_axislabel('Galactic Latitude')
ax1.coords['glat'].set_axislabel_position('r')
ax1.coords['glat'].set_ticklabel_position('r')
ax1.set_title('LOFAR RM')
plt.show()
import pdb; pdb.set_trace()
strksz="%i" % ksz
#plt.figure()
#plt.imshow(corrplane, origin='lower', extent=limsv/1e3)
#plt.xlabel(r'$v_{CO}$ [km/s]')
#plt.ylabel(r'$v_{HI}$ [km/s]')
#plt.yticks(rotation='vertical')
#plt.colorbar()
#plt.savefig('HOGcorrelationPlanck353GRSL'+fstr+'_b'+blimstr+'_k'+strksz+'_v'+v1str+'to'+v2str+'.png', bbox_inches='tight')
#plt.close()
#import pdb; pdb.set_trace()
ksz=9
astroHOGexampleLOFAR(23.75, 100., 135., ksz=ksz)
|
from setuptools import setup, find_packages
from os.path import join, dirname
with open(join(dirname(__file__), 'README.rst')) as f:
readme_text = f.read()
setup(
name="everypolitician-popolo",
version="0.0.11",
packages=find_packages(),
author="Mark Longair",
author_email="mark@mysociety.org",
description="Parse and model Popolo data from EveryPolitician",
long_description=readme_text,
license="AGPL",
keywords="politics data civic-tech",
install_requires=[
'approx_dates',
'requests',
'six >= 1.9.0',
'unidecode==0.4.18'
]
)
|
#!/usr/bin/env python3
"""Script to find all alarms which do not conform to the current naming scheme and delete them."""
import os
import boto3
from botocore.exceptions import ClientError
import logging
FORMAT = '%(asctime)-15s %(levelname)s %(module)s.%(funcName)s %(message)s'
DATEFMT = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(level=logging.INFO, format=FORMAT, datefmt=DATEFMT)
REGION = os.getenv('AWS_REGION', 'us-west-2')
ALARM_SHCEME = '{InstanceId}-{InstanceName}-{MetricName}'
def convert_list_to_dict(obj, key='Key', value='Value'):
return {e[key]: e[value] for e in obj}
def describe_instances(client, instanceids):
instances = []
for instanceid in instanceids:
try:
response = client.describe_instances(InstanceIds=[instanceid])
except ClientError as error:
logging.info('%s does not exist', instanceid)
continue
else:
for reservations in response['Reservations']:
for instance in reservations['Instances']:
instance['Tags'] = convert_list_to_dict(instance['Tags'])
instances.append(instance)
return instances
def get_instance_map(instances):
instance_map = {}
for instance in instances:
instance_map[instance['InstanceId']] = instance['Tags'].get('Name', None)
return instance_map
class GetAlarms():
def __init__(self, client):
self.client = client
self.namespaces = set()
self.metricnames = set()
self.alarms = {}
self.alarm_count = 0
def describe_alarms(self, **kwargs):
paginator = self.client.get_paginator('describe_alarms')
response_iterator = paginator.paginate(**kwargs)
for page in response_iterator:
for alarm in page['MetricAlarms']:
yield alarm
def get_alarms(self):
for alarm in self.describe_alarms():
dimensions = convert_list_to_dict(alarm['Dimensions'], 'Name', 'Value')
self.namespaces.add(alarm['Namespace'])
self.metricnames.add(alarm['MetricName'])
if 'InstanceId' not in dimensions:
continue
self.alarm_count += 1
if dimensions['InstanceId'] not in self.alarms:
self.alarms[dimensions['InstanceId']] = {}
if alarm['Namespace'] not in self.alarms[dimensions['InstanceId']]:
self.alarms[dimensions['InstanceId']][alarm['Namespace']] = {}
metricname = alarm['MetricName']
if metricname == 'LogicalDisk % Free Space' and 'instance' in dimensions:
metricname = metricname + dimensions['instance']
if metricname not in self.alarms[dimensions['InstanceId']][alarm['Namespace']]:
self.alarms[dimensions['InstanceId']][alarm['Namespace']][metricname] = []
self.alarms[dimensions['InstanceId']][alarm['Namespace']][metricname].append(alarm)
def main():
client = boto3.client('cloudwatch', REGION)
get_alarms = GetAlarms(client)
get_alarms.get_alarms()
instance_map = get_instance_map(
describe_instances(
boto3.client('ec2', REGION),
list(get_alarms.alarms.keys())))
logging.info('Unique Namespaces: %s', ', '.join(get_alarms.namespaces))
logging.info('Unique MetricNames: %s', ', '.join(get_alarms.metricnames))
logging.info('Unique Instances: %s', len(get_alarms.alarms.keys()))
logging.debug('Unique Instances: %s', ', '.join(get_alarms.alarms.keys()))
logging.info('Found %s total alarms', get_alarms.alarm_count)
multiple_alarm_count = 0
bad_alarms = []
for instanceid in get_alarms.alarms:
for namespace in get_alarms.alarms[instanceid]:
for metricname in get_alarms.alarms[instanceid][namespace]:
for alarm in get_alarms.alarms[instanceid][namespace][metricname]:
if instanceid not in instance_map or not alarm['AlarmName'].startswith('{}-{}'.format(instanceid, instance_map[instanceid])):
bad_alarms.append(alarm)
# if len(get_alarms.alarms[instanceid][namespace][metricname]) > 1:
# multiple_alarm_count += 1
# logging.warning('Found multiple entries for %s for %s: %s\n'
# 'Alarms: %s', instanceid, namespace,
# metricname, ', '.join(
# [a['AlarmName']
# for a in get_alarms.alarms[instanceid][namespace][metricname]]))
logging.info('Found %s alarms with duplicate entries', multiple_alarm_count)
logging.info('Found %s total alarms which are malformed\nAlarms:\n%s',
len(bad_alarms),
', '.join([
a['AlarmName']
for a in bad_alarms
]))
if __name__ == '__main__':
main() |
SAVE_COMMENT_DIR = r'D:\MongoDB\savejson\comment'
SAVE_NEWS_DIR = SAVE_COMMENT_DIR
if __name__=='__main__':
import pymongo
client = pymongo.MongoClient('mongodb://localhost:27017/')
db = client['cocoke']
news = db['news']
nt = db['news_table']
cs = db['comments']
comment = db['comment']
listParam = [
{"$group": {"_id": {"docId":"$docId", "title":"$title", "createTime":"$createTime", "url":"$url","content":"$content","source":"$source"},
"repeat": { "$sum": 1}}},
{"$sort": {"repeat": -1}}
]
list_comment = [
# {"$skip": 0},
# {"$limit": 200},
{"$group": {
"_id": {"docId": "$docId", "location": "$location",
"createTime": "$createTime", "nickname": "$nickname", "content": "$content",
"vote": "$vote","against": "$against"},
"repeat": {"$sum": 1}}},
]
setting = db['setting']
dictSetting = {
'crawlInterval':12,
'_id': "cocoke",
"keyWord":['新疆']
}
# setting.save(dictSetting)
dicSet = setting.find_one({'crawlInterval': 12})
crawlInterval = dicSet['crawlInterval']
print('crawlInterval: ', crawlInterval, ' hours')
#
# ct=0
# for i in cs.distinct("content"):
# ct+=1
# item = cs.find_one({'content': i})
# new=dict()
# new['docId']= item['docId']
# new['location']= item['location']
# new['createTime'] = item['createTime']
# new['nickname'] = item['nickname']
# new['content'] = item['content']
# new['vote'] = item['vote']
# new['against'] = item['against']
# comment.save(new)
# print(ct)
list_C = [
# {"$skip": 0},
{"$limit": 2000},{"$sort": {"vote":-1,"against":-1}}
]
# a = list(cs.aggregate(list_comment))
ct = 0
import codecs,csv
with codecs.open('text2.csv', 'w', 'utf_8_sig') as csvFile3:
writer2 = csv.writer(csvFile3,dialect='excel')
for doc in comment.find().skip(4000).limit(2000).sort([("vote",pymongo.DESCENDING),("against",pymongo.DESCENDING)]): # 使用distinct方法,获取每一个独特的元素列表
content = doc['content']
writer2.writerow([1,content])
# item = doc['_id']
# new=dict()
# new['_id']= item['docId']
# new['location']= item['location']
# new['createTime'] = item['createTime']
# new['nickname'] = item['nickname']
# new['content'] = item['content']
# new['vote'] = item['vote']
# new['against'] = item['against']
# comment.save(new)
client.close()
# tag = getattr(self, 'tag', None)
# url = url + 'tag/' + tag
# def findTophref(self,response):
# tabContents = response.css('.tabContents')
# reFindAllHref(tabContents)
#
# EndText = response.css('#endText').extract()
# extractLabelP(EndText)
#
# # 找docId
#
#
# def ShowAll(self,response,startUrls):
# for url in startUrls:
# fetch(url)
#
# tabContents = response.css('.tabContents')
# for x,y in reFindAllHref(tabContents):
# whichType = howToExtractContent(x)
# fetch(x)
#
# NewsUrl = response.url
# scrip_content = self.getScriptText(response)
# docId = findKeyWordJson(html_content=scrip_content
# , keyword='docId')
# productKey = findKeyWordJson(html_content=scrip_content
# , keyword='productKey')
#
# if whichType == typeNews['news']:
# # 抽取内容
# EndText = response.css('#endText').extract()
# content = extractLabelP(EndText)
# # 抽取其他
#
# title = response.css('h1::text').extract_first()
# timee = response.css('.post_time_source::text').extract_first()
# source = response.css('.post_time_source>a::text').extract_first()
# creatTime = extractTime(timee)
# dicN = makeNewsDict(url=NewsUrl,
# docId=docId,
# title=title,
# createTime=creatTime,
# content=content,
# source=source)
# # 保存
# saveNews(dicN)
#
# # 网易号 ,id = content
# elif whichType== typeNews['dy']:
# EndText = response.css('#content').extract()
# content = extractLabelP(EndText)
# # 抽取其他
# title = response.css('h2::text').extract_first().strip()
# source = '网易号'
# creatTime = response.css('.time>span::text').extract_first()
# dicN = makeNewsDict(url=NewsUrl,
# docId=docId,
# title=title,
# createTime=creatTime,
# content=content,
# source=source)
# # 保存
# saveNews(dicN)
#
# # 图集,是读取textArea内的内容
# # others可以不管正文,一般是直播之类的
# elif whichType== typeNews['photoview'] or whichType==typeNews['others'] :
# # 尝试读取textArea
# textAreaLabel = response.css('textarea::text').extract()
# if textAreaLabel:
# # 提取textArea内文本
# nexturls,textAreaDic = extractTextArea(textAreaLabel)
# # 读取下一个链接
# for url in nexturls:
# yield Request(url=url, callback=self.parse_new)
# # 保存读取的信息
# textAreaDic['docId']=docId
# textAreaDic['url']=NewsUrl
# saveNews(textAreaDic)
#
# # 分析评论Api的第一页
# # 需要在script中查找docId
# # 提取 "docId"
#
# new_url = generateCommentApi(scrip_content=scrip_content,
# offset=0,
# docId=docId,
# productKey=productKey,
# ListType='newList')
# yield Request(url=new_url, callback=self.parse_comment)
|
import os
import logging
import threading
import pygame
# Import this when building for mac
#import pygame._view
import random
from socket import *
import base64
try:
import android
except ImportError:
android = None
if not android:
import yaml
from sound import Sound
from graphics import widgets, menus, game
netlog = logging.getLogger('netlog')
inputlog = logging.getLogger('inputlog')
soundlog = logging.getLogger('soundlog')
logging.root.setLevel(logging.DEBUG)
netlog.setLevel(logging.DEBUG)
inputlog.setLevel(logging.INFO)
soundlog.setLevel(logging.INFO)
#
# Multiplayer Pong!
#
# Made by Johan Bjareholt
#
class Main():
def __init__(self):
self.running = True
self.inMenu = True
self.inGame = False
self.loggedin = False
self.mode = "singleplayer"
self.cwd = os.getcwd()
logging.info(self.cwd)
self.loadSettings()
def run(self):
pygame.init()
self.load()
self.gameinput = {"singleplayer": input.playerSplitScreen, "multiplayer": input.playerOnline}
self.loadMenu()
gfx.menu.draw()
while self.running:
if android:
if android.check_pause():
android.wait_for_resume()
input.general()
if not self.inGame:
input.inMenu(gfx.menu.location)
gfx.menu.draw()
else:
input.inGame()
self.gameinput[self.mode]()
if self.settings['game']['hippie']:
gfx.game.ball.hippie()
if gfx.game.playing:
if self.mode == "singleplayer":
gfx.game.ball.ballEvent()
gfx.game.draw()
gfx.newFrame()
self.quit()
def quit(self):
if self.loggedin:
net.lobby.sendData("lobby.logout")
pygame.quit()
def loadSettings(self):
if android:
self.settings = {'screen': {'fullscreen': True, 'resolution': [1200, 700]},
'host': {'ip': 'ngenia.net', 'port': 10000},
'audio': {'volume': 7, 'enabled': True},
'game': {'sensitivity': 12, 'hippie': False},
'user': {'password': '', 'name': ''}}
else:
settings_file = file(self.cwd + "/settings.yaml", 'r')
self.settings = yaml.load(settings_file)['client']
logging.info(self.settings)
if self.settings['game']['hippie']:
logging.info("HIPPIE!")
def load(self):
if android:
android.init()
android.map_key(android.KEYCODE_BACK, pygame.K_ESCAPE)
self.clock = pygame.time.Clock()
gfx.fontSmall = pygame.font.Font(main.cwd+"/font/CALIBRI.TTF", 30)
gfx.font = pygame.font.Font(main.cwd+"/font/CALIBRI.TTF", 50)
gfx.fontBig = pygame.font.Font(main.cwd+"/font/CALIBRI.TTF", 75)
def loadMenu(self):
gfx.menu = gfx.Menu(gfx.screen)
gfx.menu.surfaceLoad()
def loadGame(self):
gfx.game = game.Game(gfx.screen,
sound.effects,
sensitivity=int(main.settings['game']['sensitivity']),
screensize=(gfx.X, gfx.Y),
loggedin=main.loggedin)
gfx.game.surfaceLoad()
class Graphics():
def __init__(self):
self.X, self.Y = main.settings['screen']['resolution']
if main.settings['screen']['fullscreen']:
self.screen = pygame.display.set_mode((self.X, self.Y), pygame.FULLSCREEN)
else:
self.screen = pygame.display.set_mode((self.X, self.Y))
class Menu:
def __init__(self, parent):
gfx.screen = parent
self.menus = {}
for menu in ["main", "multiplayer", "login", "loginConnect"]:
self.menus[menu] = {'loaded': False}
self.location = "main"
def surfaceLoad(self):
self.pongText = gfx.font.render("Pong!", True,
pygame.color.Color(255, 255, 255))
def draw(self):
gfx.screen.fill((0, 0, 0))
gfx.screen.blit(self.pongText, ((gfx.X/2)-75, 50))
# Main menu
if self.location == "main":
if self.menus[self.location]['loaded']:
self.mainmenu.update()
else:
self.mainmenu = menus.MainMenu(gfx.screen, username=user)
self.menus[self.location]['loaded'] = True
# Multiplayer logged in screen for joining games
elif self.location == "multiplayer":
if self.menus[self.location]['loaded']:
self.multiplayermenu.update()
else:
self.multiplayermenu = menus.MultiplayerMenu(gfx.screen, "main", username=user)
self.menus[self.location]['loaded'] = True
# Login prompt
elif self.location == "login":
if self.menus[self.location]['loaded']:
self.loginmenu.update()
else:
self.loginmenu = menus.LoginMenu(gfx.screen, "main", username=user)
self.menus[self.location]['loaded'] = True
# Logging in and connecting to server load menu
elif self.location == "loginConnect":
if self.menus[self.location]['loaded']:
self.loginconnectmenu.update()
else:
self.loginconnectmenu = menus.LoadingMenu(gfx.screen, "main", username=user)
self.menus[self.location]['loaded'] = True
def newFrame(self):
main.clock.tick(60)
pygame.display.flip()
class Input():
def __init__(self):
self.newly_pressed = {}
def general(self):
pygame.event.pump()
self.events = pygame.event.get()
inputlog.debug(self.events)
self.pressed = pygame.key.get_pressed()
#logging.info(events)
for event in self.events:
if event.type == pygame.QUIT:
main.running = False
def inMenu(self, location):
if location == "main":
menu = gfx.menu.mainmenu
for event in self.events:
if event.type == pygame.MOUSEBUTTONDOWN:
if menu.startButton.rect().collidepoint(pygame.mouse.get_pos()):
logging.info("You started singleplayer! ")
main.loadGame()
main.inGame = True
main.inMenu = False
sound.music.fadeout(3000)
# Start multiplayer
if menu.multiplayerButton.rect().collidepoint(pygame.mouse.get_pos()):
if main.loggedin:
gfx.menu.location = "multiplayer"
gfx.menu.draw()
else:
gfx.menu.location = "login"
gfx.menu.draw()
# Quitbutton
if menu.backButton.rect().collidepoint(pygame.mouse.get_pos()):
main.running = False
elif location == "multiplayer":
menu = gfx.menu.multiplayermenu
for event in self.events:
if event.type == pygame.MOUSEBUTTONDOWN:
# Start multiplayer
if menu.quickFindButton.rect().collidepoint(pygame.mouse.get_pos()):
logging.info("You started multiplayer! ")
net.game.start()
net.game.sendData("game.initUdp", "")
net.lobby.sendData("lobby.quickFind", "")
main.loadGame()
main.inGame = True
game.playing = True
main.inMenu = False
sound.music.fadeout(3000)
# Backbutton
if menu.backButton.rect().collidepoint(pygame.mouse.get_pos()):
gfx.menu.location = menu.parent
gfx.menu.draw()
elif location == "login":
menu = gfx.menu.loginmenu
for event in self.events:
if event.type == pygame.MOUSEBUTTONDOWN:
# Start multiplayer
global user, password
if menu.loginButton.rect().collidepoint(pygame.mouse.get_pos()):
if not menu.usernameBox.inputText or not menu.passwordBox.inputText:
pass
else:
gfx.menu.location = "loginConnect"
gfx.menu.draw()
password = menu.passwordBox.inputText
b64ed_passwd = base64.b64encode(password)
# Final variables
user = menu.usernameBox.inputText
password = b64ed_passwd
net.lobby.register = False
net.lobby.start()
elif menu.registerButton.rect().collidepoint(pygame.mouse.get_pos()):
gfx.menu.location = "loginConnect"
gfx.menu.draw()
password = menu.passwordBox.inputText
b64ed_passwd = base64.b64encode(password)
# Final variables
user = menu.usernameBox.inputText
password = b64ed_passwd
net.lobby.register = True
net.lobby.start()
# Focus username textbox
if menu.usernameBox.rect().collidepoint(pygame.mouse.get_pos()):
menu.usernameBox.focus = True
menu.passwordBox.focus = False
if menu.passwordBox.rect().collidepoint(pygame.mouse.get_pos()):
menu.passwordBox.focus = True
menu.usernameBox.focus = False
# Backbutton
if menu.backButton.rect().collidepoint(pygame.mouse.get_pos()):
gfx.menu.location = menu.parent
gfx.menu.draw()
# Handle username textbox input
if menu.usernameBox.focus:
menu.usernameBox.getKey(input.events, self.pressed)
menu.usernameBox.draw()
gfx.menu.draw()
elif menu.passwordBox.focus:
menu.passwordBox.getKey(input.events, self.pressed)
menu.passwordBox.draw()
gfx.menu.draw()
elif location == "loginConnect":
menu = gfx.menu.loginconnectmenu
for event in self.events:
if event.type == pygame.MOUSEBUTTONDOWN:
if menu.backButton.rect().collidepoint(pygame.mouse.get_pos()):
# Killing thread
net.lobby.running = False
del net.lobby
net.lobby = net.TcpHandler()
# Going back to main menu
gfx.menu.location = menu.parent
gfx.menu.draw()
def inGame(self):
for event in self.events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gfx.game.playing = not gfx.game.playing
if event.type == pygame.MOUSEBUTTONDOWN:
if not gfx.game.playing:
if gfx.game.pauseMenu.leaveButton.rect().collidepoint((pygame.mouse.get_pos()[0]-gfx.game.pauseMenu.X,
pygame.mouse.get_pos()[1]-gfx.game.pauseMenu.Y)):
gfx.menu.draw()
main.inGame = False
def player1(self):
key = pygame.key.get_pressed()
if key[pygame.K_w] and gfx.game.leftBrick.Y > 0:
gfx.game.leftBrick.Y -= gfx.game.sensitivity
if key[pygame.K_s] and gfx.game.leftBrick.Y < gfx.Y-gfx.game.leftBrick.H:
gfx.game.leftBrick.Y += gfx.game.sensitivity
def player2(self):
key = pygame.key.get_pressed()
if key[pygame.K_o] and gfx.game.rightBrick.Y > 0:
gfx.game.rightBrick.Y -= gfx.game.sensitivity
if key[pygame.K_l] and gfx.game.rightBrick.Y < gfx.Y-gfx.game.rightBrick.H:
gfx.game.rightBrick.Y += gfx.game.sensitivity
def playerSplitScreen(self):
if gfx.game.playing:
self.player1()
self.player2()
def playerOnline(self):
key = pygame.key.get_pressed()
if net.game.playerslot == 1:
if key[pygame.K_w] and gfx.game.leftBrick.Y > 0:
gfx.game.leftBrick.Y -= gfx.game.sensitivity
net.game.sendData("game.padY", gfx.game.leftBrick.Y)
elif key[pygame.K_s] and gfx.game.leftBrick.Y < 600:
gfx.game.leftBrick.Y += gfx.game.sensitivity
net.game.sendData("game.padY", gfx.game.leftBrick.Y)
else:
if key[pygame.K_w] and gfx.game.rightBrick.Y > 0:
gfx.game.rightBrick.Y -= gfx.game.sensitivity
net.game.sendData("game.padY", gfx.game.rightBrick.Y)
elif key[pygame.K_s] and gfx.game.rightBrick.Y < 600:
gfx.game.rightBrick.Y += gfx.game.sensitivity
net.game.sendData("game.padY", gfx.game.rightBrick.Y)
class Networking():
#
# userinfo|action|data
#
# Userinfo
# name,dahash
#
# Actions
# "method"."category"."value"
#
def __init__(self):
self.msghandler = self.MessageHandler()
self.game = self.UdpHandler()
self.lobby = self.TcpHandler()
global user, address
user = None
address = (main.settings['host']['ip'],
main.settings['host']['port'])
class TcpHandler(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.name = "TcpConnection"
self.daemon = True
self.running = True
self.register = False
self.socket = socket(AF_INET, SOCK_STREAM)
#self.user = "Johan" + str(random.randint(0, 100))
def run(self):
netlog.info("Starting tcp socket!")
self.socket.connect(address)
# Login section
if not self.register:
net.lobby.sendData("lobby.login", password)
elif self.register:
net.lobby.sendData("lobby.register", password)
self.register = False
while not main.loggedin and main.running and self.running:
# Recv action
recv_data = self.socket.recv(2048)
data = recv_data.split('|')
netlog.debug("TCP:Got: " + recv_data)
# Message Handler
response = net.msghandler.handleAuthentication(data)
if response:
self.sendData(response)
# Lobby and game section
while main.loggedin and main.running and self.running:
# Recv action
recv_data = self.socket.recv(2048)
if recv_data:
netlog.debug("TCP:Got: " + recv_data)
# Message Handler
for message in recv_data.split(';'):
data = message.split('|')
response = net.msghandler.handle(data)
if response:
self.sendData(response)
self.socket.close()
def sendData(self, action, data=None):
data = "{user}|{action}|{data};".format(user=user, action="tcp."+action, data=data)
self.socket.send(data)
netlog.debug("TCP:Sent: " + data)
class UdpHandler(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.name = "Connection"
self.daemon = True
self.running = True
self.socket = socket(AF_INET, SOCK_DGRAM)
#user = "Johan" + str(random.randint(0, 100))
self.playerslot = None
def run(self):
netlog.info("Starting udp socket!")
main.mode = "multiplayer"
while main.running:
recv_data, addr = self.socket.recvfrom(2048)
if recv_data:
netlog.debug("UDP:Got: " + recv_data)
# Message Handler
for message in recv_data.split(';'):
data = message.split('|')
response = net.msghandler.handle(data)
if response:
self.sendData(response)
def sendData(self, action, data=" "):
data = "{user}|{action}|{data};".format(user=user, action="udp."+action, data=data)
self.socket.sendto(data, address)
netlog.debug("UDP:Sent: " + data)
class MessageHandler():
def handle(self, data):
try:
action = data[0]
protocol, category, variable = action.split('.')
value = data[1]
if category == "game":
self.game(protocol, variable, value)
elif category == "lobby":
self.lobby(protocol, variable, value)
except Exception as e:
netlog.error("Could not parse: {} \nError: {}".format(data, e), exc_info=True)
def game(self, protocol, variable, value):
# UDP
if protocol == "udp":
if variable == "ball":
value = str(value).split(',')
gfx.game.ball.X = int(value[0])
gfx.game.ball.Y = int(value[1])
elif variable == "pad2":
if net.game.playerslot == 1:
gfx.game.rightBrick.Y = int(value)
elif net.game.playerslot == 2:
gfx.game.leftBrick.Y = int(value)
# TCP
elif protocol == "tcp":
if variable == "msg":
gfx.game.statusMessage = str(value)
elif variable == "score":
value = value.split(',')
gfx.game.player1 = value[0]
gfx.game.player2 = value[1]
elif variable == "playerslot":
net.game.playerslot = int(value)
if variable == "collision":
sound.playSound('boing')
def lobby(self, protocol, variable, value):
if variable == "userinfo":
if not register:
net.lobby.sendData("lobby.login", b64ed_passwd)
elif register:
net.lobby.sendData("lobby.register", b64ed_passwd)
if variable == "login":
netlog.debug(value.split(',')[0])
if value.split(',')[0] == "True":
gfx.menu.location = "multiplayer"
gfx.menu.draw()
main.loggedin = True
else:
gfx.menu.loginconnectmenu.setMessage(value.split(',')[1])
gfx.menu.draw()
def handleAuthentication(self, data):
try:
action = data[0]
protocol, category, variable = action.split('.')
value = data[1]
self.authentication(protocol, variable, value)
except Exception as e:
netlog.error("Could not parse: {} \nError: {}".format(data, e), exc_info=True)
def authentication(self, protocol, variable, value):
if variable == "login":
netlog.debug(value.split(',')[0])
if value.split(',')[0] == "True":
gfx.menu.location = "multiplayer"
gfx.menu.draw()
main.loggedin = True
else:
gfx.menu.loginconnectmenu.setMessage(value.split(',')[1])
gfx.menu.draw()
if __name__ == '__main__' or __name__ == 'client':
main = Main()
gfx = Graphics()
input = Input()
sound = Sound(soundlog, main.settings)
net = Networking()
main.run()
|
from django.contrib import admin
from .models import Product, ProductGallery, ProductComment
from django.contrib import messages
from django.utils.translation import ngettext
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
list_display = ['__str__', 'thumbnail_pic', 'price', 'slug', 'active', ]
list_filter = ('timestamp', 'active')
search_fields = ('title', 'description')
list_editable = ['active']
class Meta:
Model = Product
class CommentAdmin(admin.ModelAdmin):
list_display = ['__str__', 'email', 'product', 'is_read']
list_editable = ['is_read']
class Meta:
Model = ProductComment
admin.site.register(Product, ProductAdmin)
admin.site.register(ProductGallery)
admin.site.register(ProductComment, CommentAdmin)
|
# Day 12: Inheritance
# Delving into Inheritance
# Given two classes with templates, Person and Student,
# complete the Student class.
# Grading sale: T >= 0, D >= 40, P >= 55, A >= 70, E >= 80, and O >= 90 <= 100
# constructor with first name, last name, id, and array of test scores.
# Write a method that calculates a Student's test score average and
# return the grade as a character from the grading scale.
class Person(object):
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName
self.lastName = lastName
self.idNumber = int(idNumber)
def printPerson(self):
print 'Name:%s, %s' % (self.lastName, self.firstName)
print 'ID:%d' % self.idNumber
class Student(Person):
# constructor with first name, last name, id, and array of test scores.
def __init__(self, firstName, lastName, idNumber, testScores):
# super(Student, self).__init__(firstName, lastName, idNumber)
Person.__init__(self, firstName, lastName, idNumber)
self.testScores = testScores
# Calculates a Student's test score average and
# returns the grade as a character from the grading scale.
def calculate(self, testScores):
# print '%r' % testScores
# sum = 0
# for score in testScores:
# sum += score
# return sum / len(testScores)
total = sum(testScores)
average = total / len(testScores)
if average >= 90:
return 'O'
elif average >= 80:
return 'E'
elif average >= 70:
return 'A'
elif average >= 55:
return 'P'
elif average >= 40:
return 'D'
else:
return 'T'
# main
if __name__ == '__main__':
line = raw_input().split()
firstName = line[0]
lastName = line[1]
idNum = line[2]
numScores = int(raw_input())
scores = map(int, raw_input().split())
student = Student(firstName, lastName, idNum, scores)
student.printPerson()
letter_grade = student.calculate(scores)
print "Grade:", letter_grade
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.