text stringlengths 8 6.05M |
|---|
import numpy as np
X=np.loadtxt('data')
#X = np.random.normal(size=[20,8])
#np.savetxt('data', X)
Y=np.zeros((np.size(X[:, 0]), 4), 'complex')
for a in range(0, 4): Y[:,a]=X[:, 2*a]+1j*X[:, 2*a+1]
print Y
#P, D, Q = np.linalg.svd(Y)
P, D, Q = np.linalg.svd(Y, False)
Y_a = np.dot(np.dot(P, np.diag(D)), Q)
print(np.std(Y), np.std(Y_a), np.std(Y - Y_a))
print P
print D
|
from heart_server_helpers import validate_patient
import pytest
@pytest.mark.parametrize("pat_id, expected", [
(-1, True),
(-2, True),
(-3, False),
])
def test_existing_beats(pat_id, expected):
pat_exist = validate_patient(pat_id)
assert pat_exist == expected
|
# -*- coding: utf-8 -*-
"""Unittests for Janitoo.
"""
__license__ = """
This file is part of Janitoo.
Janitoo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Janitoo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Janitoo. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Sébastien GALLET aka bibi21000'
__email__ = 'bibi21000@gmail.com'
__copyright__ = "Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000"
# Update this value when running on raspberry
# 1.5 is a good choice
SLEEP = 0.50
import sys, os, errno
import time
import unittest
import threading
import json as mjson
import shutil
from pkg_resources import iter_entry_points
from nose.plugins.skip import SkipTest
from janitoo.mqtt import MQTTClient
from janitoo.dhcp import JNTNetwork, HeartbeatMessage
from janitoo.utils import json_dumps, json_loads
from janitoo.utils import HADD_SEP, HADD
from janitoo.utils import TOPIC_HEARTBEAT
##############################################################
#Check that we are in sync with the official command classes
#Must be implemented for non-regression
from janitoo.classes import COMMAND_DESC
COMMAND_DISCOVERY = 0x5000
assert(COMMAND_DESC[COMMAND_DISCOVERY] == 'COMMAND_DISCOVERY')
##############################################################
class JNTCommon(object):
"""Common tests for JNTServer
"""
def test_000_server_start_wait_stop(self):
self.wipTest("Pass but freeze nosetests")
self.startServer()
time.sleep(1.5)
self.stopServer()
class JNTControllerServer(JNTCommon):
"""Tests for JNTServer acting as a controller
"""
message = None
def mqtt_on_message(self, client, userdata, message):
"""On generic message
"""
self.message = message
def mqtt_on_heartbeat_message(self, client, userdata, message):
"""On generic message
"""
self.heartbeat_message = message
def startHeartbeat(self):
self.startClient(options=self.server.options.data)
self.mqtthearbeat.subscribe(topic='/dhcp/heartbeat', callback=self.mqtt_on_heartbeat_message)
def assertHeartbeat(self, timeout=90):
self.heartbeat_message = None
for i in range(0,timeout*1000):
if self.heartbeat_message is not None:
break
else:
time.sleep(0.001)
self.assertTrue(self.heartbeat_message is not None)
self.assertTrue(self.heartbeat_message.payload is not None)
def assertHeartbeatNode(self, hadd, timeout=90, status=None):
self.heartbeat_message = None
checked = False
state = None
add_ctrl, add_node = hadd.split(HADD_SEP)
for i in range(0,timeout*1000):
if self.heartbeat_message is not None:
#~ print self.heartbeat_message
hb = HeartbeatMessage(self.heartbeat_message)
hbadd_ctrl, hbadd_node, state = hb.get_heartbeat()
#~ print hbadd_ctrl, hbadd_node, state
#msg = json_loads(self.heartbeat_message.payload)
if int(hbadd_ctrl) == int(add_ctrl) and int(hbadd_node) == int(add_node):
if status is not None:
if state == status:
checked = True
break
else:
checked = True
break
self.heartbeat_message = None
else:
time.sleep(0.001)
print("HADD : ", add_ctrl, add_node, state)
self.assertTrue(checked)
def assertNodeRequest(self, cmd_class=0, uuid='request_info_nodes', node_hadd=None, client_hadd=None):
self.mqttc.subscribe(topic='/nodes/%s/reply'%client_hadd, callback=self.mqtt_on_message)
time.sleep(0.5)
msg={ 'cmd_class': cmd_class, 'genre':0x04, 'uuid':uuid, 'reply_hadd':client_hadd}
self.mqttc.publish('/nodes/%s/request' % (node_hadd), json_dumps(msg))
for i in range(0,300):
if self.message is not None:
break
else:
time.sleep(0.05)
self.assertTrue(self.message is not None)
self.assertTrue(self.message.payload is not None)
self.mqttc.unsubscribe(topic='/nodes/%s/reply'%client_hadd)
def assertBroadcastRequest(self, cmd_class=0, uuid='request_info_nodes', client_hadd=None):
self.mqttc.subscribe(topic='/broadcast/reply/%s'%client_hadd, callback=self.mqtt_on_message)
time.sleep(0.5)
msg={ 'cmd_class': cmd_class, 'genre':0x04, 'uuid':uuid, 'data':client_hadd}
self.mqttc.publish('/broadcast/request', json_dumps(msg))
for i in range(0,300):
if self.message is not None:
break
else:
time.sleep(0.05)
self.assertTrue(self.message is not None)
self.assertTrue(self.message.payload is not None)
self.mqttc.unsubscribe(topic='/broadcast/reply/%s'%client_hadd)
def test_050_start_heartbeat_stop(self):
self.startServer()
time.sleep(0.5)
self.startHeartbeat()
time.sleep(0.5)
self.assertHeartbeat()
self.stopServer()
def test_052_start_reload_stop(self):
self.startServer()
time.sleep(0.5)
self.startHeartbeat()
time.sleep(0.5)
self.assertHeartbeat()
time.sleep(2.5)
self.assertHeartbeat()
self.server.reload()
time.sleep(2.5)
self.assertHeartbeat()
self.stopServer()
def test_055_start_reload_threads_stop(self):
self.startServer()
time.sleep(0.5)
self.startHeartbeat()
time.sleep(0.5)
self.assertHeartbeat()
time.sleep(0.5)
self.assertHeartbeat()
self.server.reload_threads()
time.sleep(2.5)
self.assertHeartbeat()
self.stopServer()
class JNTDBCommon(object):
"""Common tests for JNTDBServer
"""
def test_040_server_check_db_auto_migrate(self):
self.startServer()
self.server.check_db(migrate=True)
self.stopServer()
def test_041_server_check_db_auto_migrate_from_conf(self):
self.startServer()
self.server.check_db(migrate=None)
self.stopServer()
|
from sqlalchemy import Column, String, create_engine, event, DDL, Integer, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timezone
conn_string = "postgres://user:pwd@localhost:5432/code_examples"
db = create_engine(conn_string, echo=True)
Base = declarative_base()
event.listen(
Base.metadata, "before_create", DDL("CREATE SCHEMA IF NOT EXISTS orm_inheritance")
)
class BaseModel(Base):
__abstract__ = True # declared to not be created by create_all
__table_args__ = {"schema": "orm_inheritance"}
id = Column(Integer, primary_key=True, autoincrement=True)
created_at = Column(
DateTime(timezone=True), default=lambda: datetime.now(timezone.utc)
)
updated_at = Column(DateTime(timezone=True), nullable=True)
def save(self, session, commit=True):
session.add(self)
if commit:
try:
session.commit()
except Exception as e:
session.rollback()
finally:
session.close()
class Class(BaseModel):
__tablename__ = "class"
name = Column(String)
description = Column(String)
class Teacher(BaseModel):
__tablename__ = "teacher"
name = Column(String)
surname = Column(String)
hired_at = Column(DateTime)
class Student(BaseModel):
__tablename__ = "student"
name = Column(String)
surname = Column(String)
birth_date = Column(DateTime)
if __name__ == "__main__":
Session = sessionmaker(db)
session = Session()
Base.metadata.create_all(db)
|
from apps.items.models.weapons import *
from apps.items.models.ships import *
|
from netCDF4 import Dataset
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
file_extensions = ['air.2m.gauss','uwnd.10m.gauss','pr_wtr.eatm','slp','vwnd.10m.gauss']
folders = ['Air Temperature', 'Horizontal Wind','Precipitation Water', 'Sea Level Pressure','Vertical Wind']
Variables = ['air','uwnd','pr_wtr','slp','vwnd']
# Ranges within dataset that correspond to 21N-27N, 72E-85E, and May 20-Sep. 30
lat_max = 35
lat_min = 32
long_min = 38
long_max = 45
time_min = 141
time_max = 273
years = 67
days = 132
lat_dpoints = 3
long_dpoints = 7
def GetData (year_index, VariableDataSet):
year = 1948 + year_index
dataset = Dataset('C:/Users/user/Documents/Personal/Research/Climate Variable Data/' + folders[var] + '/' + file_extensions[var] + '.' + str(year) + '.nc', 'r')
dataset = dataset.variables[Variables[var]][time_min:time_max,lat_min:lat_max,long_min:long_max]
dataset = np.asarray(dataset).astype(float)
# Concatenating
VariableDataSet = np.vstack((VariableDataSet, dataset))
return VariableDataSet
def SaveMeanStd (Array):
mean_array = Array.mean(axis = 0)
std_array = Array.std(axis = 0)
print("Shape of Mean Array: ", mean_array.shape)
print("Shape of Std Array: ", std_array.shape)
np.save('Mean_Array_' + Variables[var] ,mean_array)
np.save('Std_Array_' + Variables[var] ,std_array)
# ===============Main================
for var in range (0,len(Variables)):
print('Starting ' + Variables[var] + ' variable...\n')
VariableDataSet = np.zeros((0,3,7), dtype = float)
output_file_name = Variables[var] + '_raw_3d_dataset'
for year_index in range(0,30):
print(year_index + 1948)
VariableDataSet = GetData(year_index, VariableDataSet)
np.save(output_file_name, VariableDataSet)
SaveMeanStd(VariableDataSet) |
def cal_height(plates):
height = 10
for i in range(1, len(plates)):
if plates[i] == plates[i-1]:
height += 5
else:
height += 10
return height
plates = input()
print(cal_height(plates))
|
"""Unit test package for pygpt."""
|
# Total purchase
# Calculate and show the total purchase for items
# Anatoli Penev
# 27.10.1017
mouse = float(input('Enter price')) # enter price for item "mouse"
keyboard = float(input('Enter price')) # enter price for item "keyboard"
desk = float(input('Enter price')) # enter price for item "desk"
chair = float(input('Enter price')) # enter price for item "chair"
monitor = float(input('Enter price')) # enter price for item "monitor"
tax = 0.06 # tax amount
tax_price = (mouse+keyboard+desk+chair+monitor)*tax # math calculation for total price for all items purchased
subtotal = (mouse+keyboard+desk+chair+monitor) # subtotal price
tax_price_added = tax_price+subtotal
print('Tax amount is', tax) # Show tax amount
print('Total price before tax', subtotal) # Total before tax
print('Total amount of items purchased is', tax_price) # Total price after tax
print('Subtotal + Tax', tax_price_added) # subtotal + tax added
|
import random
class LinkedList:
def __init__(self):
self.head = 0
self.size = 0
class Node:
def __init__(self,element,next=None ):
self.element = element
self.next = next
def push(self,e):
self.head = self.Node(e,self.head)
self.size += 1
def isempty(self):
return self.size == 0
def top(self):
if self.isempty():
raise Exception('Empty')
return self.head.element
def length(self):
return self.size
def pop(self):
pop = self.head.element
self.head = self.head.next
self.size =-1
return pop
def printList(self):
current = self.head
count = 0
while current is not None:
print(current.element)
current = current.next
count += 1
#print("Index",count)
if count == self.size:
print("DOne")
break
def COnvertToArray(self):
self.popToArray()
print(self.arry)
self.PrintArray()
def popToArray(self):
self.arry = []
for ran in range(l): # converting linked list to array
temp = self.pop()
print("temp",temp)
self.arry.append(temp)
#print("Index :", len(self.arry), "and Value : ", temp)
return self.arry
def PrintArray(self):
for r, v in enumerate(self.arry,1):
print(r,v)
def FindData(self,Data):
current = self.head
print("Search For : " ,Data)
count = 0
while current is not None:
print(current.element)
if current.element == Data:
print("Data :", Data ," = element :",current.element)
return
count += 1
current = current.next
if count == self.size:
print("SEarch item ",Data , "Not Found")
break
stack = LinkedList()
# for r in range(10):
# rand = random.randint(0,1)
# stack.push(rand)
stack.push(1)
stack.push(2)
stack.push(3)
stack.push(8)
stack.push(90)
stack.push(4)
#stack.pop()
l = stack.length()
print("length",l)
stack.FindData(900)
print("--------------")
stack.printList()
print("--------------")
stack.COnvertToArray()
# stack.PrintArray()
#stack.printList()
|
from pyModbusTCP import utils
def rewrite_modbus_read(list):
new_list = dict()
for i in range(1,len(list)):
if i%2:
new_list[(i+1)/2] = hex(list[i]) + hex(list[i-1])[2:]
# else:
# new_list[i-1] = hex(list[i+1]) + hex(list[i])[2:]
# print(i)
# print(utils.decode_ieee(int(new_list[i+1], 16)))
new_list[(i+1)/2] = utils.decode_ieee(int(new_list[(i+1)/2], 16))
return new_list
temp = [0, 0, 7679, 17826, 18432, 17749, 20889, 17768, 21478, 50869, 35737, 50877, 42829, 50845, 47841, 17254, 35389, 17255, 42598, 17254, 31719, 17103, 62521, 17110, 6292, 17083, 10224, 16968, 4981, 16968, 10224, 16968, 20024, 51750, 0, 0, 0, 0, 39321, 48921, 39321, 49081, 65535, 16511, 52428, 16946, 13107, 16985, 52429, 49614, 58327, 17254, 47841, 17255, 51773, 17254, 14156, 16009, 1573, 16001, 44564, 16007, 62915, 16967, 55051, 16967, 65274, 16967, 32768, 17442, 0, 0, 0, 0, 29568, 18162, 26265, 18151, 40524, 18147, 63283, 17714, 7168, 17704, 54476, 17607, 55050, 17254, 43909, 17255, 49152, 17254, 48628, 17160, 62423, 17153, 61866, 17152, 10224, 16968]
print(list(rewrite_modbus_read(temp).values()))
fields=list(rewrite_modbus_read(temp).values())
import csv
with open(r'log.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(fields) |
#!/usr/bin/env
# encoding: utf-8
"""
Created by John DiBaggio on 2016-11-30
"""
__author__ = 'johndibaggio'
import sys
import os
argv = list(sys.argv)
input_file = open(argv[1])
output_file = open(argv[2], 'w+')
dna = input_file.read()
rna = ""
for nucleobase in dna:
if nucleobase == 'T':
rna += "U"
else:
rna += nucleobase
# rna = dna.replace("T", "U")
output_file.write(rna)
output_file.close()
input_file.close()
|
import numpy as np
import matplotlib.pyplot as plt
import sys
# >>> DESCOMENTAR PARA ENTREGA <<<
sys_argv = sys.argv
file_argv = sys_argv[1]
iterations_argv = int(sys_argv[2])
# >>> COMENTAR PARA ENTREGA <<<
# file_argv = 'house_prices_train.csv'
# iterations_argv = 300
def print_graph(data, is_theta=False, is_cost=False):
living_area, sales_price, overall_quality = get_properties(data)
plt.figure(figsize=(10, 6))
if is_theta:
plt.scatter(range(iterations), theta0_progress)
plt.scatter(range(iterations), theta1_progress)
plt.scatter(range(iterations), theta2_progress)
plt.xlabel('Thetas')
plt.ylabel('Iterações')
elif is_cost:
plt.scatter(range(iterations), cost)
plt.xlabel('Custo')
plt.ylabel('Iterações')
else:
plt.scatter(overall_quality + living_area, sales_price)
pred = theta0 + (living_area * theta1) + (overall_quality * theta2)
plt.scatter(living_area + overall_quality, pred)
plt.xlabel('Parametros')
plt.ylabel('Preço')
plt.title('Data')
plt.show()
def get_properties(data):
x = np.array(data[:, 46])
x = np.delete(x, 0)
x = np.array([minmax_scaling(xi, np.min(x), np.max(x)) for xi in x])
y = np.array(data[:, 80])
y = np.delete(y, 0)
z = np.array(data[:, 17])
z = np.delete(z, 0)
z = np.array([minmax_scaling(zi, np.min(z), np.max(z)) for zi in z])
return x, y, z
def h0(theta_0, theta_1, theta_2, xi, zi):
return theta_0 + (xi * theta_1) + (zi * theta_2)
def compute_u(theta_0, theta_1, theta_2, xi, yi, zi):
return h0(theta_0, theta_1, theta_2, xi, zi) - yi
def compute_cost(theta_0, theta_1, theta_2, data):
x, y, z = get_properties(data)
summation = 0
for i in range(x.size):
summation += pow(compute_u(theta_0, theta_1, theta_2, x[i], y[i], z[i]), 2)
total_cost = summation / x.size
return total_cost
def step_gradient(theta_0_current, theta_1_current, theta_2_current, data, alpha):
x, y, z = get_properties(data)
summation0 = 0
summation1 = 0
summation2 = 0
for i in range(x.size):
u = compute_u(theta_0_current, theta_1_current, theta_2_current, x[i], y[i], z[i])
summation0 += u * 1
summation1 += u * x[i]
summation2 += u * z[i]
theta_0_updated = theta_0_current - (2 * alpha) * (summation0 / x.size)
theta_1_updated = theta_1_current - (2 * alpha) * (summation1 / x.size)
theta_2_updated = theta_2_current - (2 * alpha) * (summation2 / x.size)
return theta_0_updated, theta_1_updated, theta_2_updated
def gradient_descent(data, starting_theta_0, starting_theta_1, starting_theta_2, learning_rate, num_iterations):
# valores iniciais
theta_0 = starting_theta_0
theta_1 = starting_theta_1
theta_2 = starting_theta_2
# variável para armazenar o custo ao final de cada step_gradient
cost_graph = []
# vetores para armazenar Theta0 e Theta1 apos cada iteração de step_gradient (pred = Theta1*x + Theta0)
theta_0_progress = []
theta_1_progress = []
theta_2_progress = []
# Para cada iteração, obtem novos (Theta0,Theta1,Theta2) e calcula o custo (EQM)
for i in range(num_iterations):
cost_graph.append(compute_cost(theta_0, theta_1, theta_2, data))
theta_0, theta_1, theta_2 = step_gradient(theta_0, theta_1, theta_2, data, learning_rate)
theta_0_progress.append(theta_0)
theta_1_progress.append(theta_1)
theta_2_progress.append(theta_2)
return [theta_0, theta_1, theta_2, cost_graph, theta_0_progress, theta_1_progress, theta_2_progress]
def minmax_scaling(x, xmin, xmax):
return (x - xmin) / (xmax - xmin)
house_prices_data = np.genfromtxt(file_argv, delimiter=',')
iterations = iterations_argv
theta0, theta1, theta2, cost, theta0_progress, theta1_progress, theta2_progress = gradient_descent(
house_prices_data,
starting_theta_0=0,
starting_theta_1=0,
starting_theta_2=0,
learning_rate=0.01,
num_iterations=iterations)
# Imprimir parâmetros otimizados
print('theta_0: ', theta0)
print('theta_1: ', theta1)
print('theta_2: ', theta2)
print('Erro quadratico medio: ', compute_cost(theta0, theta1, theta2, house_prices_data))
# print_graph(house_prices_data, False, True)
|
# -*- coding: utf-8 -*-
class Solution:
def integerReplacement(self, n):
if n == 1:
return 0
elif n == 2:
return 1
elif n == 3:
return 2
elif n % 4 == 1:
return 3 + self.integerReplacement((n - 1) // 4)
elif n % 4 == 3:
return 3 + self.integerReplacement((n + 1) // 4)
return 1 + self.integerReplacement(n // 2)
if __name__ == "__main__":
solution = Solution()
assert 3 == solution.integerReplacement(8)
assert 4 == solution.integerReplacement(7)
|
# -*- coding: utf-8 -*-
#
# Author: Amanul Haque
import pandas as pd
import numpy as np
from os import listdir
from os.path import isfile, join
from gensim.models import KeyedVectors
import os
import nltk
import re
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from string import punctuation
from autocorrect import spell
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import math
import statistics
from nltk.tokenize import sent_tokenize
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
os.environ['CORENLP_HOME'] = '/home/ahaque2/project/virtual_environment_1/stanfordNLP'
class get_word_embeddings:
def __init__(self):
self.features = ['similarity', 'top5_senti', 'article_senti', 'head_senti', 'hedge_words', 'word_embeddings']
#Code for Text preprocessing
def autospell(self, text):
"""
correct the spelling of the word.
"""
spells = [spell(w) for w in (nltk.word_tokenize(text))]
return " ".join(spells)
def to_lower(self, text):
"""
:param text:
:return:
Converted text to lower case as in, converting "Hello" to "hello" or "HELLO" to "hello".
"""
return text.lower()
def remove_numbers(self, text):
"""
take string input and return a clean text without numbers.
Use regex to discard the numbers.
"""
output = ''.join(c for c in text if not c.isdigit())
return output
def remove_punct(self, text):
"""
take string input and clean string without punctuations.
use regex to remove the punctuations.
"""
return ''.join(c for c in text if c not in punctuation)
def remove_Tags(self, text):
"""
take string input and clean string without tags.
use regex to remove the html tags.
"""
cleaned_text = re.sub('<[^<]+?>', '', text)
return cleaned_text
def sentence_tokenize(self, text):
"""
take string input and return list of sentences.
use nltk.sent_tokenize() to split the sentences.
"""
sent_list = []
for w in nltk.sent_tokenize(text):
sent_list.append(w)
return sent_list
def word_tokenize(self, text):
"""
:param text:
:return: list of words
"""
return [w for sent in nltk.sent_tokenize(text) for w in nltk.word_tokenize(sent)]
def remove_stopwords(self, sentence):
"""
removes all the stop words like "is,the,a, etc."
"""
stop_words = stopwords.words('english')
return ' '.join([w for w in nltk.word_tokenize(sentence) if not w in stop_words])
def stem(self, text):
"""
:param word_tokens:
:return: list of words
"""
snowball_stemmer = SnowballStemmer('english')
stemmed_word = [snowball_stemmer.stem(word) for sent in nltk.sent_tokenize(text)for word in nltk.word_tokenize(sent)]
return " ".join(stemmed_word)
def lemmatize(self, text):
wordnet_lemmatizer = WordNetLemmatizer()
lemmatized_word = [wordnet_lemmatizer.lemmatize(word)for sent in nltk.sent_tokenize(text)for word in nltk.word_tokenize(sent)]
return " ".join(lemmatized_word)
def preprocess(self, text):
lower_text = self.to_lower(text)
sentence_tokens = self.sentence_tokenize(lower_text)
word_list = []
for each_sent in sentence_tokens:
lemmatizzed_sent = self.lemmatize(each_sent)
clean_text = self.remove_numbers(lemmatizzed_sent)
clean_text = self.remove_punct(clean_text)
clean_text = self.remove_Tags(clean_text)
clean_text = self.remove_stopwords(clean_text)
word_tokens = self.word_tokenize(clean_text)
for i in word_tokens:
word_list.append(i)
return word_list
def mean_vectorizor(self, model, tokenized_sent, dim):
return np.array([
np.mean([model[w] for w in words if w in model.vocab]
or [np.zeros(dim)], axis=0)
for words in tokenized_sent
])
def get_tokenized_body_para1(self, text, dim=5):
tokenized_sent = sent_tokenize(text)
#body_text = tokenized_sent[0:dim]
body_text = tokenized_sent
bt = []
[bt.extend(b.split('.')[:-1]) for b in body_text]
return bt
def get_hedge_word_counts(self, sent_list):
f = open('hedge_words.txt')
hedge_words = f.readlines()
hedge_words = [h.strip('\n') for h in hedge_words]
hedge_word_counts = []
for sent in sent_list:
count = 0
for word in hedge_words:
if word in sent:
count+=1
hedge_word_counts.append(count)
return hedge_word_counts
def get_senti_score(self, comment_list):
analyzer = SentimentIntensityAnalyzer()
senti_score = [analyzer.polarity_scores(text) for text in comment_list]
return senti_score
def get_features(self, article_id, article_body, stance_id, labels, headlines):
model = KeyedVectors.load_word2vec_format('../../word_embedings/GoogleNews-vectors-negative300.bin', binary=True)
df = pd.DataFrame(columns = ['stance_id', 'label', 'word_features'])
for bid, txt in zip(article_id, article_body):
index = np.where(stance_id == bid)[0]
article = np.array(self.get_tokenized_body_para1(txt))
#hedge_word_counts = self.get_hedge_word_counts(article)
lab = labels.iloc[index]
heads = headlines.iloc[index]
heads_tokenized = [self.preprocess(h) for h in heads]
head_vec = self.mean_vectorizor(model, heads_tokenized, 300)
feature_vec = []
sent_embeddings = []
for sent in article:
v = self.mean_vectorizor(model, [self.preprocess(sent)], 300)
sent_embeddings.append(v)
from scipy import spatial
for h, headl in zip(head_vec, heads_tokenized):
sim = []
for emb in sent_embeddings:
sim.append(1 - spatial.distance.cosine(h, emb))
sim = [0 if math.isnan(x) else x for x in sim]
top5 = np.argsort(sim)[-5:]
if(len(top5) > 0):
sent_body = []
[sent_body.extend(self.preprocess(article[i])) for i in top5]
vec = self.mean_vectorizor(model, [sent_body], 300).tolist()[0]
#print("Shape of article vector ", type(vec), len(vec))
s = statistics.mean([sim[i] for i in top5])
vec.append(s)
senti = self.get_senti_score(headl)
comp_senti = statistics.mean([a['compound'] for a in senti])
vec.append(comp_senti)
senti = self.get_senti_score(article)
comp_senti = statistics.mean([a['compound'] for a in senti])
vec.append(comp_senti)
top5_sent = article[top5]
senti = self.get_senti_score(top5_sent)
top5_comp_senti = statistics.mean([a['compound'] for a in senti])
vec.append(top5_comp_senti)
#print("New Vec shape ", len(vec))
else:
vec = np.zeros(305)
feature_vec.append(vec)
df2 = pd.DataFrame(columns = ['stance_id', 'label', 'word_features'])
df2['stance_id'] = index
df2['label'] = np.array(lab)
df2['word_features'] = feature_vec
df = df.append(df2, ignore_index = True)
#sys.exit()
return df
def get_features2(self, article_id, article_body, stance_id, labels, headlines):
df = pd.DataFrame(columns = ['stance_id', 'label', 'word_features'])
model = KeyedVectors.load_word2vec_format('../../word_embedings/GoogleNews-vectors-negative300.bin', binary=True)
for bid, txt in zip(article_id, article_body):
index = np.where(stance_id == bid)[0]
if(len(index) > 0):
#print("Article ", txt, type(txt))
article = txt[0:1000]
article = self.preprocess(article)
lab = labels.iloc[index]
heads = headlines.iloc[index]
heads_tokenized = [self.preprocess(h) for h in heads]
#print("Headlines length ", len(heads_tokenized))
head_vec = self.mean_vectorizor(model, heads_tokenized, 300)
article_vec = self.mean_vectorizor(model, [article], 300)
feature_vec = []
#print(article_vec.shape)
#print(head_vec.shape)
from scipy import spatial
for h in head_vec:
s = 1 - spatial.distance.cosine(h, article_vec)
c = np.concatenate((h, s, article_vec), axis = None)
feature_vec.append(c)
#print("Feature Vector length ", len(feature_vec))
df2 = pd.DataFrame(columns = ['stance_id', 'label', 'word_features'])
df2['stance_id'] = index
df2['label'] = np.array(lab)
df2['word_features'] = feature_vec
df = df.append(df2, ignore_index = True)
return df
def get_Xy(self, article_body, article_stance):
body = pd.read_csv(article_body)
stance = pd.read_csv(article_stance)
#print(body.shape)
#print(stance.shape)
article_id = body['Body ID']
stance_id = stance['Body ID']
article_body = body['articleBody']
labels = stance['Stance']
headlines = stance['Headline']
df = self.get_features2(article_id, article_body, stance_id, labels, headlines)
#print("Shape of Final dataframe ", df.shape)
df = df.drop(np.where(df.isna() == True)[0])
#df.to_csv("combined_features/preprocessed.csv")
y = df['label']
X = df['word_features']
return X, y
|
#!/usr/bin/env python
'''
astarcmd -- resolves finding paths in road maps with A-Star algorithm
astarcmd is a description
It defines classes_and_methods
@author: Pedro Vicente
@copyright: 2013 Biicode test. All rights reserved.
@license: license
@contact: pedrovfer@gmail.com
@deffield updated: Updated
'''
import sys
import os
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
__all__ = []
__version__ = 0.1
__date__ = '2013-03-05'
__updated__ = '2013-03-05'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by Pedro Vicente on %s.
Copyright 2013. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-V', '--version', action='version', version=program_version_message)
parser.add_argument(dest="file", help="path to file with roadmap ")
parser.add_argument("-o", "--origin", help="origin of route.From city")
parser.add_argument("-e", "--end", help="end of route. Goal city")
# Process arguments
args = parser.parse_args()
if args.origin is None:
print 'too few arguments. origin argument mandatory. See help'
return 2
if args.end is None:
print 'too few arguments. end argument mandatory. See help'
return 2
from src.data import CityMap
citymap = CityMap.loadFromFile(args.file)
from src.astarsolver import Cities_AStarSolver
solver = Cities_AStarSolver(citymap)
route, cost = solver.route(args.origin, args.end)
if route is None:
print 'There is any route to go from %s to %s in map defined in %s'%(args.origin, args.end, args.file)
else:
print 'Route: %s with cost %s has been detected wiht A-Star Algorithm in map defined in %s'%(route, cost, args.file)
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception:
raise
if __name__ == "__main__":
sys.exit(main()) |
"""
Flask Documentation: http://flask.pocoo.org/docs/
Jinja2 Documentation: http://jinja.pocoo.org/2/documentation/
Werkzeug Documentation: http://werkzeug.pocoo.org/documentation/
This file creates your application.
"""
import os, jwt, base64
from datetime import datetime
from app import app, db, login_manager
from flask import render_template, request, flash, url_for, redirect, jsonify, _request_ctx_stack, g, session
from flask_login import login_user, logout_user, current_user, login_required
from app.models import Posts, Users, Likes, Follows
from .forms import RegisterForm, LoginForm, NewPostForm
from werkzeug.utils import secure_filename
from werkzeug.security import check_password_hash
from functools import wraps
from wtforms.validators import InputRequired
from wtforms import HiddenField
from flask_wtf import FlaskForm
#Create a JWT @requires_auth decorator
# This decorator can be used to denote that a specific route should check
# for a valid JWT token before displaying the contents of that route.
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.headers.get('Authorization', None)
if not auth:
return jsonify({'code': 'authorization_header_missing', 'description': 'Authorization header is expected'}), 401
parts = auth.split()
if parts[0].lower() != 'bearer':
return jsonify({'code': 'invalid_header', 'description': 'Authorization header must start with Bearer'}), 401
elif len(parts) == 1:
return jsonify({'code': 'invalid_header', 'description': 'Token not found'}), 401
elif len(parts) > 2:
return jsonify({'code': 'invalid_header', 'description': 'Authorization header must be Bearer + \s + token'}), 401
token = parts[1]
try:
payload = jwt.decode(token, app.config['TOKEN_SECRET'])
except jwt.ExpiredSignature:
return jsonify({'code': 'token_expired', 'description': 'token is expired'}), 401
except jwt.DecodeError:
return jsonify({'code': 'token_invalid_signature', 'description': 'Token signature is invalid'}), 401
g.current_user = user = payload
return f(*args, **kwargs)
return decorated
@app.route('/api/users/register', methods=["POST"])
# Accepts user information and saves it to the database.
def register():
print ("enterd")
registerform = RegisterForm()
if request.method == "POST":
if registerform.validate_on_submit():
username = registerform.username.data
password = registerform.password.data
firstname = registerform.firstname.data
lastname = registerform.lastname.data
email = registerform.email.data
location = registerform.location.data
biography = registerform.biography.data
profile_photo = registerform.profile_photo.data
photofilename = secure_filename(profile_photo.filename)
profile_photo.save(os.path.join(app.config['UPLOAD_FOLDER'], photofilename))
joined_on = datetime.utcnow()
registration = {"message": "User successfully registered"}
user = Users(username, password, firstname, lastname, email, location, biography, photofilename, joined_on)
db.session.add(user)
db.session.commit()
return jsonify(registration=registration)
else:
#return jsonify(errors=form_errors(registerform))
return jsonify(errors=[{"errors": form_errors(registerform) }])
return jsonify(errors=[{"errors": form_errors(registerform) }])
#return render_template('index.html', registerform=registerform)
@app.route('/api/auth/login', methods=["POST"])
# Accepts login credentials as username and password
def login():
loginform = LoginForm()
if request.method == "POST":
if loginform.validate_on_submit():
username = loginform.username.data
password = loginform.password.data
user = Users.query.filter_by(username=username).first()
if user is not None and check_password_hash(user.password, password):
# get user id, load into session
login_user(user)
payload = {
username: username
}
token = jwt.encode(payload, app.config['TOKEN_SECRET'], algorithm='HS256').decode('utf-8')
id=user.id
successlogin = {
"token": token,
"message": "User successfully logged in.",
"id":id
}
return jsonify(error=None, successlogin=successlogin)
else:
flash('Username or Password is incorrect.')
#return jsonify(errors=form_errors(loginform))
return jsonify(postvalidation={"postvalidation":"Username or Password is incorrect."})
return jsonify(errors=[{"errors": form_errors(loginform) }])
@app.route('/api/auth/logout', methods=["GET"])
#@login_required
# Logout a user
@requires_auth
def logout():
logout_user()
logout = {
"message": "User successfully logged out."
}
return jsonify(logout=logout)
@app.route('/api/users/<user_id>', methods=["GET"])
@login_required
@requires_auth
def getUserDetails(user_id):
post_list= Posts.query.filter_by(user_id=user_id).all()
posts=[{"id":post.id, "user_id":post.user_id,"photo":post.photo,"description":post.caption,"created_on":post.created_on}for post in post_list]
user=Users.query.filter_by(id=user_id).first()
user_details=[{"id":user.id,"username":user.username,"firstname":user.firstname,"lastname":user.lastname,"email":user.email,"location":user.location,"biography":user.biography,"profile_photo":user.profile_photo,"joined_on":user.joined_on,"posts":posts}]
return jsonify(getUserDetails=user_details)
@app.route('/api/users/<user_id>/posts', methods=["POST"])
@login_required
@requires_auth
# Used for adding posts to the users feed
def newpost(user_id):
newpostform = NewPostForm()
if request.method == "POST":
if newpostform.validate_on_submit():
caption = newpostform.caption.data
post_photo = newpostform.post_photo.data
post_photoname = secure_filename(post_photo.filename)
post_photo.save(os.path.join(app.config['UPLOAD_FOLDER'], post_photoname))
created_on = datetime.utcnow()
post = Posts(user_id, post_photoname, caption, created_on)
db.session.add(post)
db.session.commit()
newpostmessage = {
"message": "Successfully created a new post"
}
return jsonify(newpostmessage=newpostmessage)
else:
return jsonify(errors={"errors":form_errors(newpostform)})
return jsonify(errors={"errors":form_errors(newpostform)})
@app.route('/api/users/<user_id>/posts', methods=["GET"])
@login_required
@requires_auth
# Returns a user's posts
def viewposts(user_id):
userid=user_id
post_list= Posts.query.filter_by(user_id=userid).all()
posts=[{"id":post.id, "user_id":post.user_id,"photo":post.photo,"description":post.caption,"created_on":post.created_on}for post in post_list]
return jsonify(viewposts=posts)
@app.route('/api/users/<user_id>/follow', methods=["GET","POST"])
@requires_auth
# #Create a Follow relationship between the current user and the target user.
def follow(user_id):
if request.method == "POST":
class Form(FlaskForm):
follower_id= HiddenField(None,validators=[InputRequired()])
form=Form()
followerid=request.form["followerid"]
followmessage = [
{
"message": "You are now following that user."
}]
follows = Follows(user_id=user_id, follower_id=followerid)
db.session.add(follows)
db.session.commit()
return jsonify(follow=followmessage)
else:
followsList=Follows.query.filter_by(user_id=user_id).all()
followerList=[follow.follower_id for follow in followsList]
if len(followerList)==0:
num_followers = [{"followers":0},{"followerList":followerList}]
else:
num_followers = [{"followers": len(followerList)},{"followerList":followerList}]
return jsonify(follow=num_followers)
@app.route('/api/posts', methods=["GET"])
@requires_auth
# Return all posts for all userss
def allposts():
allpostslist = Posts.query.all()
tables=[]
for posts in allpostslist:
id=current_user.id
list=(Likes.query.filter_by(post_id=posts.user_id).filter_by(user_id=id)).all()
if len(list)==1:
mylike=True
else:
mylike=False
eachpost = {
"id": posts.id,
"user_id": posts.user_id,
"photo": '/static/uploads/'+ posts.photo ,
"caption": posts.caption,
"created_on": (posts.created_on).strftime("%d %B %Y"),
"username": Users.query.filter_by(id=posts.user_id).first().username,
"profile_photo": '/static/uploads/' + Users.query.filter_by(id=posts.user_id).first().profile_photo,
"likes": len(Likes.query.filter_by(post_id=posts.user_id).all()),
"mylike": mylike
}
tables.append(eachpost)
return jsonify(posts={"posts": tables})
@app.route('/api/posts/{post_id}/like', methods=["POST"])
@requires_auth
# Set a like on the current Post by the logged in User
def like(post_id):
if request.method("POST"):
likes = Likes(current_user.id, post_id)
db.session.add(likes)
db.session.commit()
currentlikes = (Likes.query.filter_by(post_id=post_id).first()).user_id.count()
likesmessage = {
"message": "Post liked!",
"likes": currentlikes
}
return jsonify(likesmessage=likesmessage)
###
# Routing for your application.
###
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def index(path):
"""
Because we use HTML5 history mode in vue-router we need to configure our
web server to redirect all routes to index.html. Hence the additional route
"/<path:path".
Also we will render the initial webpage and then let VueJS take control.
"""
return render_template('index.html')
def get_uploaded_images():
imageslist = []
rootdir = os.getcwd()
print (rootdir)
for subdir, dirs, files in os.walk(rootdir + '/app/static/uploads'):
for file in files:
print (os.path.join(subdir, file))
imageslist.append(file)
return imageslist
# Here we define a function to collect form errors from Flask-WTF
# which we can later use
def form_errors(form):
error_messages = []
"""Collects form errors"""
for field, errors in form.errors.items():
for error in errors:
message = u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
)
error_messages.append(message)
return error_messages
###
# The functions below should be applicable to all Flask apps.
###
# user_loader callback. This callback is used to reload the user object from
# the user ID stored in the session
@login_manager.user_loader
def load_user(id):
return Users.query.get(int(id))
###
# The functions below should be applicable to all Flask apps.
###
@app.route('/<file_name>.txt')
def send_text_file(file_name):
"""Send your static text file."""
file_dot_text = file_name + '.txt'
return app.send_static_file(file_dot_text)
@app.after_request
def add_header(response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also tell the browser not to cache the rendered page. If we wanted
to we could change max-age to 600 seconds which would be 10 minutes.
"""
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.errorhandler(404)
def page_not_found(error):
"""Custom 404 page."""
return render_template('404.html'), 404
def format_date_joined(date_joined):
## Format the date to return day, month and year date
return "Joined on " + date_joined.strftime("%A %B %d, %Y")
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0", port="8080")
|
import json
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import sys
tf.compat.v1.enable_eager_execution() # Remove when switching to tf2
from boxes import get_boxes, get_final_box
from constants import feature_size, real_image_height, real_image_width, max_boxes
from img import get_img
from prediction import get_prediction, get_prediction_mask
labels_colors = [
"#000000",
"#0000FF",
"#000088",
"#8888FF",
"#FF0000",
"#880000",
"#FF8888",
]
labels_name = [
"Nothing",
"Ally",
"Ally with hat",
"Ally ghost",
"Ennemy",
"Ennemy with hat",
"Ennemy ghost",
]
def show(id, img_path, boxes, mask):
fig = plt.figure(figsize=(20, 10))
fig.canvas.set_window_title("Faster-RCNN and Mask-RCNN: review image {}".format(id))
plt.subplot(1, 2, 1)
image = plt.imread(img_path)
ax = plt.gca()
for b in boxes:
edgecolor = labels_colors[b["label"]]
ax.annotate(labels_name[b["label"]], xy=(b["x1"], b["y2"] + 20))
rect = patches.Rectangle((b["x1"],b["y1"]), b["x2"] - b["x1"], b["y2"] - b["y1"], edgecolor=edgecolor, facecolor='none')
ax.add_patch(rect)
plt.imshow(image)
plt.subplot(1, 2, 2)
image = plt.imread(img_path)
for i in range(max_boxes):
specific_mask = np.array(mask != i + 1, dtype=np.int)
image[:, :, i % 3] = image[:, :, i % 3] * specific_mask
image[:, :, (i+1) % 3] = image[:, :, (i+1) % 3] * specific_mask
plt.imshow(image)
plt.show()
def show_mask(data_index):
img_path = "../pictures/pictures_detect_local_evaluate_100/{}.png".format(data_index)
img = get_img(img_path)
boxes, probs, classification_logits, regression_values = get_prediction(img)
bounding_boxes = []
for i in range(len(boxes)):
if probs[i] > .9:
x1, y1, x2, y2 = get_final_box(boxes[i], regression_values[i], limit_border=False)
label = np.argmax(classification_logits[i])
bounding_boxes.append({
"x1": x1 * real_image_width // feature_size,
"y1": y1 * real_image_height // feature_size,
"x2": x2 * real_image_width // feature_size,
"y2": y2 * real_image_height // feature_size,
"label": label,
})
mask = get_prediction_mask(img)
show(data_index, img_path, bounding_boxes, mask)
index = None
if len(sys.argv) > 1:
if (sys.argv[1].isdigit()):
index = eval(sys.argv[1])
if (index == None):
print("Usage: python show_mask.py [0-99]")
else:
show_mask(str(index))
|
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
import time
import requests
def geturl(url):
res = requests.head(url)
url = res.headers.get('location')
return url
a = 1
with open('id.txt', 'r') as x:
for line in x:
id = line.replace('\n', '')
url = 'http://clients2.google.com/service/update2/crx?response=redirect&prodversion=49.0&acceptformat=crx3&x='+id+'%26installsource%3Dondemand%26uc'
new = geturl(url)
print(new)
r= requests.get(new)
name = str(a)+'.crx'
with open(name, "wb") as code:
code.write(r.content)
a=a+1
|
class Caja:
def __init__(self, alto, ancho, largo):
self.alto = alto
self.ancho = ancho
self.largo = largo
def base_caja(self):
return self.alto * self.ancho * self.largo
alto_caja = int(input("Ingrese el alto de su caja: "))
ancho_caja = int(input("Ingrese el ancho de su caja: "))
largo_caja = int(input("Ingrese el largo de su caja: "))
cajas = Caja(alto_caja, ancho_caja, largo_caja)
print(cajas.base_caja()) |
############################################
# Thomas Lehman-Borer & Rachel Chamberlain #
# Dungeons and Dragons Facilitator #
# CS 1 Final Project #
############################################
from random import shuffle
chars = {}
monst = {}
class SentientBeing:
'''This class is a parent class to Character and Monster. It holds simple methods common to both subclasses,
such as basic getters for health and experience, and basic setters like changing health and armor. In addition,
this class has the methods minForHit and attack, where the former is only used in the latter and thus is private.'''
### CONSTRUCTOR ###
def __init__(self, name, experience, health, species, attacks, armor):
'''Initialize common attributes and create a dictionary of functions
that can be used in the combat function (near the end of the file).'''
self.__name = name # string
self.__experience = experience # float
self.__health = health # list with two items -> [current, max]
self.__species = species # string
self.attacks = attacks # dictionary
self.__armor = armor # integer
self.combatDict = {'attack': self.attack,
'changeHealth': self.changeHealth,
'getHealth': self.getHealth,
'setMaxHealth': self.setMaxHealth,
'getArmor': self.getArmor,
'setArmor': self.setArmor,
'getExp': self.getExp,
'addExp': self.addExp,
'getName': self.getName,
'getSpecies': self.getSpecies}
### GETTERS ###
# These are the simple getters of attributes instances of the sentient being class.
def getHealth(self):
return self.__health
def getArmor(self):
return self.__armor
def getExp(self):
return self.__experience
def getSpecies(self):
return self.__species
def getName(self):
return self.__name
### SETTERS ###
# These are setter methods to modify attributes of the SentientBeing (SB) instances.
def changeHealth(self, change):
''' This method takes the parameter change which is then added to the current health of the being.
To decrease the current health, simply enter a negative integer.'''
current = self.__health[0]
maximum = self.__health[1]
# Define if/else statements to make sure the health doesn't go below zero or above the max
if current + int(change) < 0:
self.__health[0] = 0
elif current + int(change) > maximum:
self.__health[0] = maximum
else:
self.__health[0] += int(change)
def setMaxHealth(self, val):
'''This method changes the max amount of health a being can have and decreases the current
if it becomes greater than the max when changing the max.'''
self.__health[1] = int(val)
if self.__health[0] > self.__health[1]:
self.__health[0] = self.__health[1]
def addExp(self, change):
'''This method adds to the experience, which is a float'''
self.__experience += float(change)
def setArmor(self, newArm):
'''This method sets the armor class of the being.
Note: smaller values for the armor class equates to better armor.'''
if newArm > 0 and newArm < 10:
self.__armor = newArm
else: # else statement to make sure the armor class is in the right range.
print("You must enter an integer from 1 to 9")
### OTHERS ###
def __str__(self):
'''Overload function so that printing the SB instance gives only the name
(and not a gibberish pointer).'''
return self.__name
def __bool__(self):
'''This overload is mainly for knowing when a SB instance is dead (current health = 0).
It is used in combat to know when to remove them from the appropriate lists/dictionary.'''
if self.__health[0] == 0:
return False
else:
return True
def __minForHit(self, being, attRoll):
'''This private function determines what the minimum die roll is in order to be able to hit the
SB. This is its own function because we need to calculate this with armor (for all SB) and level for
Characters and experience for Monsters. This function starts as an if/else statement to know which
calculation method to use.'''
if isinstance(self, Character):
dArmor = being.getArmor()
# good for character levels 1-3; information for higher levels to come later
# armor : minimum roll
table = {9: 10, 8: 11, 7: 12, 6: 13, 5: 14, 4: 15, 3: 16, 2: 17}
return table[dArmor] # returns the min roll value for the character
elif isinstance(self, Monster):
dArmor = being.getArmor()
# splitting something formatted like '3d6 + 1' into [[3,6],1]
# works without modifier or with negative modifier
if '+' not in attRoll and '-' not in attRoll:
attRoll += '+0'
attRoll = attRoll.split('+')
elif '+' in attRoll:
attRoll = attRoll.split('+')
elif '-' in attRoll:
attRoll = attRoll.split('-')
attRoll[1] = int(attRoll[1])
attRoll[0] = attRoll[0].split('d')
attRoll[0][0] = int(attRoll[0][0])
attRoll[0][1] = int(attRoll[0][1])
numDice = attRoll[0][0]
if numDice >= 11:
table = {9: 0, 8: 1, 7: 2, 6: 3, 5: 4, 4: 5, 3: 6, 2: 7}
elif numDice in [9, 10]:
table = {9: 2, 8: 3, 7: 4, 6: 5, 5: 6, 4: 7, 3: 8, 2: 9}
elif numDice in [7, 8]:
table = {9: 4, 8: 5, 7: 6, 6: 7, 5: 8, 4: 9, 3: 10, 2: 11}
elif (numDice in [5, 6]) or (numDice == 4 and attRoll[1] > 0):
table = {9: 5, 8: 6, 7: 7, 6: 8, 5: 9, 4: 10, 3: 11, 2: 12}
elif numDice == 4 or (numDice == 3 and attRoll[1] > 0):
table = {9: 6, 8: 7, 7: 8, 6: 9, 5: 10, 4: 11, 3: 12, 2: 13}
elif numDice == 3 or (numDice == 2 and attRoll[1] > 0):
table = {9: 8, 8: 9, 7: 10, 6: 11, 5: 12, 4: 13, 3: 14, 2: 15}
elif numDice == 2 or attRoll[1] > 1:
table = {9: 9, 8: 10, 7: 11, 6: 12, 5: 13, 4: 14, 3: 15, 2: 16}
else:
table = {9: 10, 8: 11, 7: 12, 6: 13, 5: 14, 4: 15, 3: 16, 2: 17}
return table[dArmor] # # returns the min roll value for the monster
def attack(self, being):
'''This method is how characters attack other characters. It takes the param of the being to be attacked
(being, "the target") and used attacks info from the attacker (self) to ask the user what attack to use.
The user gets to roll the die, but the function tells the user how they should calculate the damage
done by the attack and the user just tells the function what the result is. This function tells you if an
attack hits the target and, if it does, it deducts from the health automatically.'''
# If statement to make sure that being, not an instance of a SentientBeing subclass,
# becomes one or is labeled as not attackable. When using combat, a string is passed as being
# so we must account for that by pulling the value from the key's respective dictionary.
if not isinstance(being, SentientBeing):
if being in chars:
being = chars[being]
elif being in monst:
being = monst[being]
else:
print("This is not a valid being to attack")
# Interact with the user by asking which attack in the attacker's (self's) attack dictionary
# the user wants to use. We made it possible for the user to only type a partial string and
# get the attack from there. If there are more than one attacks with the partial string, it will
# ask the user to enter the full name.
print('What is the attack of choice?')
print(self.attacks)
possibilities = 0
attack = input(' >> ')
while attack not in self.attacks:
fullName = ''
for a in self.attacks:
if attack in a:
possibilities += 1
fullName = a
if possibilities == 1:
attack = fullName
elif possibilities > 1:
print('Which attack did you mean?')
attack = input(' >> ')
else:
print('That is not an available attack.')
attack = input(' >> ')
# The user enters the results of the hit die, or if the attack will land on the target.
hitDie = int(input('What is the result of a 1d20 roll? '))
# If it does, then ask what the results of the damage roll is and change the health of the
# target accordingly.
if self.__minForHit(being, self.attacks[attack]) <= hitDie:
attDie = int(input('What is the result of a ' +
self.attacks[attack] + ' roll? '))
being.changeHealth(-attDie)
if being.getHealth()[0] != 0:
print('The health of', being.getName(), 'is now', being.getHealth())
else:
print('You have slain', being.getName() + '.')
# case for the target evading
elif hitDie < 10:
print(being.getName(), 'evades the attack.')
# Case for the armor blocking the attack.
else:
print("The attack is blocked by the defender's armor.")
########################################################################################################################
class Character(SentientBeing):
'''This is a subclass of SentientBeing which adds attributes and methods which aren't used in Monster.
It all the methods of its parent class and adds attributes and getters/setters for money and level
and now the character has a player name.'''
### CONSTRUCTOR ###
def __init__(self, name, player, level, experience, health, species, armor, money, attacks):
self.__player = player
self.__level = level
self.__money = money
super().__init__(name, experience, health, species, attacks, armor)
self.combatDict['playerName'] = self.playerName
self.combatDict['getLevel'] = self.getLevel
self.combatDict['lvlUp'] = self.lvlUp
if name not in chars:
chars[name] = self
### GETTERS ###
# the basic getters for the subclass
def getMoney(self):
# returns the list of the money to the terminal
return self.__money
def playerName(self):
return self.__player
def getLevel(self):
return self.__level
### SETTERS ###
def lvlUp(self):
print('Any level-dependent attacks must be changed manually.')
self.__level += 1
########################################################################################################################
class Monster(SentientBeing):
'''This is a subclass of SentientBeing and has no unique attributes but it's easier to keep track of being types
with the additonal subclass. Future plans include subclasses of Monster.'''
### CONSTRUCTOR ###
def __init__(self, name, experience, health, species, attacks, armor):
super().__init__(name, experience, health, species, attacks, armor)
if name not in monst:
monst[name] = self
########################################################################################################################
def newChar():
'''newChar is a function to make a new character in the game. It asks you step-by-step for attributes and
constructs the Character object based on responses. Attacks will be added directly the attacks dictionary
for the Character. This function returns the Character object, so use [name] = newChar() to get
your new character, where [name] is replaced with the actual name of the character.'''
print("Begin new character construction.\n")
name = input('What is the character name? ').replace(" ", "")
player = input("What is the player name? ")
level = int(input("What level is the character? "))
experience = float(input("How much experience does the character have? "))
health1 = int(input("What is the character's max health? "))
health0 = int(input("What is the character's current health? "))
health = [health0, health1]
species = input("What species is the character? ")
armor = int(input("What is the character's armor class? "))
print('How much of each of these monetary denominations does the character have?')
plat = input('Platinum: ')
gold = input('Gold: ')
silv = input('Silver: ')
copp = input('Copper: ')
elec = input('Electrum: ')
money = [int(plat), int(gold), int(silv), int(copp), int(elec)]
attacks = {}
print("\nYou will need to set your attacks separately.")
return Character(name, player, level, experience, health, species, armor, money, attacks)
def newMonster():
'''This function is very similar to newChar only it makes Monster objects. It is designed and used in the
same way (user input for each attribute and the function constructs and returns the Monster object.'''
print("Begin new monster construction.\n")
name = input('What shall we call the monster? ').replace(" ", "")
species = input("What is the species of this monster? ")
experience = float(input("What is the experience of this monster? "))
health1 = int(input("What is the max health of this monster? "))
health0 = health1
health = [health0, health1]
armor = int(input("What armor class does the monster have? "))
attacks = {}
print("\nYou will need to set the monsters attacks separately.")
return Monster(name, experience, health, species, attacks, armor)
def save(chars):
'''This function saves current Character data to a .txt file. It writes one Character object per line
and separates the attributes with a colon (':'). The load function below reads the save file format.'''
filename = input('Filename: ')
fh = open(filename, 'w')
fh.write('CHARS\n')
for char in chars.values():
attackKeys = ''
attackVals = ''
for key in char.attacks:
attackKeys += key + ','
attackVals += char.attacks[key] + ','
attacks = attackKeys[:-1] + ';' + attackVals[:-1]
attributes = [str(char), char.playerName(), str(char.getLevel()),
str(char.getExp()), str(char.getHealth())[1:-1],
char.getSpecies(), str(char.getArmor()),
str(char.getMoney())[1:-1], attacks]
fh.write(':'.join(attributes) + '\n')
fh.write('ENDCHARS\n')
fh.close()
print('Character data saved.')
def load():
"""This function reads in from a save file (.txt) and returns a dictionary of character
objects. For ease of use, user should say 'chars = load()'. Once the file is loaded, the user will have
to set each character object to a variable(ie: >>> bernie = chars['Bernie'])"""
characters = {}
filename = input('Filename: ')
fh = open(filename, 'r')
fh.readline() # first line of file(CHARS)
line = fh.readline().strip("\n") # reads the first character and constructs it in the while loop
while 'ENDCHARS' not in line: # for each character
# formatting into desired types
args = line.split(':')
args[2] = int(args[2]) # level
args[3] = float(args[3]) # experience
args[4] = args[4].split(',') # health
for i in range(len(args[4])):
args[4][i] = int(args[4][i])
args[6] = int(args[6]) # armor
args[7] = args[7].split(',') # money
for i in range(len(args[7])):
args[7][i] = int(args[7][i])
attacks = args[8].split(';') # attacks dictionary
attackKeys = attacks[0].split(',')
attackVals = attacks[1].split(',')
args[8] = dict(zip(attackKeys, attackVals))
# remove the occasional extra empty key
if '' in args[8]:
del args[8]['']
# add the Character name to the dictionary as the key and the Character object as the value.
characters[args[0]] = Character(*args)
# Read the next line to either add another character to the dictionary or stop the while loop
# if the line is "ENDCHARS"
line = fh.readline().strip("\n")
fh.close()
return characters # return the dictionary
def combat(Chars, Monst):
'''This function was the biggest pain in the butt. It takes the parameters of the character list chars
and the monster list monst and randomly makes an order in which the combatants get their turn.
During their turn, they can choose from a number of functions in the combatDict defined in SB.
type "next" to end the turn and otherwise type the function you want to execute. If the function
takes parameters, just type a space between the funciton and the parameter and the combat function interprets.
Current note: even if one side is totally dead, because of the for loop, it must finish going through the
combatants before a winner is declared. (Maybe not still the case??)'''
charList = list(Chars.values())
monsList = list(Monst.values())
combatants = charList + monsList
shuffle(combatants) # shuffle the list for a random order
print("\nThe order is:")
for com in combatants:
print('\t' + str(com)) # print the order of the turns
# Make a while loop to keep going until one list (monsList or charList) is empty.
while monsList != [] and charList != []:
for com in combatants: # use a for loop to go through each SB instance in the combatants list.
print('\n' + str(com).upper())
continuing = True
while continuing: # make a while loop to keep the user's turn going until they type 'next'
action = input("What does the combatant do? When done with turn, type 'next' to continue.\n >> ")
functs = action.split()
if functs[0] == 'next':
continuing = False
elif functs[0] not in com.combatDict:
print("This is not a valid action.")
elif len(functs) > 1:
com.combatDict[functs[0]](*functs[1:]) # execute the function with parameters
else: # if len(functs) == 1
if 'get' == functs[0][:3]: # execute the getter functions without parameters and print the return
print(com.combatDict[functs[0]]())
else:
com.combatDict[functs[0]]() # execute the functions without parameters.
# When something dies in combat, it's off all lists.
for char in combatants:
if not bool(char):
combatants.remove(char)
if char in charList:
charList.remove(char)
del chars[char.getName()]
elif char in monsList:
monsList.remove(char)
del monst[char.getName()]
# when one list is empty, it declared a winner.
if charList == []:
print("Battle is over. The winner is the monsters.")
elif monsList == []:
print("Battle is over. The winner is the characters.")
|
#For Robot
from jetbot import Robot
robot = Robot()
robotSpeed = 0.4
robot.stop()
import jetson.inference
import jetson.utils
net = jetson.inference.detectNet("ssd-mobilenet-v2", threshold=0.5)
#camera = jetson.utils.videoSource("csi://0") # '/dev/video0' for V4L2
camera = jetson.utils.videoSource("/dev/video1") # '/dev/video0' for V4L2
display = jetson.utils.videoOutput("display://0") # 'my_video.mp4' for file
#display = jetson.utils.videoOutput("rtp://192.168.1.169:1234") # 'my_video.mp4' for file
index = 0
width = 0
location = 0
while display.IsStreaming():
img = camera.Capture()
detections = net.Detect(img)
display.Render(img)
display.SetStatus("Object Detection | Network {:.0f} FPS".format(net.GetNetworkFPS()))
for detection in detections:
index = detections[0].ClassID
width = (detections[0].Width)
location = (detections[0].Center[0])
#print("detection:")
#print(index)
#print(width)
#print(location)
if(index == 1):
if(location > 800):
print("Right")
robot.right(robotSpeed)
elif(location < 400):
print("Left")
robot.left(robotSpeed)
else:
print("Stop")
robot.stop()
else:
print("Stop")
robot.stop()
# reset
index = 0
width = 0
location = 0
|
from ctypes import POINTER, c_longlong, c_double, c_int, c_void_p, c_char_p, cdll, Structure, CFUNCTYPE, POINTER, sizeof, CDLL
import logging
log = logging.getLogger(__name__)
lib = cdll.LoadLibrary("./awesome.so")
lib.Exmain()
|
# -*- coding: utf-8 -*-
"""
Author : wangyuqiu
Mail : yuqiuwang929@gmail.com
Website : https://www.yuqiulearn.cn
Created : 2018/7/12 14:31
"""
from selenium import webdriver
import time
import random
# 使用selenium爬取链家成都高新区二手房价信息
# 每一页爬取前,随机等待1~3s
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
def get_data(my_url, savefile):
sleep_time = random.uniform(1, 3)
time.sleep(sleep_time)
driver = webdriver.Chrome("D:/chromedriver_win32/chromedriver.exe", chrome_options=chrome_options)
#driver = webdriver.Chrome("D:/chromedriver_win32/chromedriver.exe")
driver.get(my_url)
contents = driver.find_elements_by_class_name("clear")
for cont in contents:
savefile.write(cont.text.replace("\n", "\t")+"\n")
driver.quit()
return savefile
f = open("cd_liangjia.xls", 'w+')
for x in range(1, 101):
url = "https://cd.lianjia.com/ershoufang/gaoxin7/pg%d" % x
f = get_data(url, f)
print("第%d页,搞定了!" % x)
f.close()
|
#!/usr/bin/env python3
import random
tr = {}
def parse_table():
for l in open('table.txt', 'r').readlines():
(char, options) = l.split(' = ')
char = char.strip()
options = [x.strip() for x in options.split(',')]
assert len(options) <= 2
options.append(char.lower())
tr[char.upper()] = options
def print_tr():
for key in sorted(tr.keys()):
print("{} -> {}".format(key, tr[key]))
def translate_source(filename):
contents = open(filename, 'r').read()
res = ''
for c in contents:
if c.upper() in tr:
new_char = random.choice(tr[c.upper()])
res += new_char
else:
res += c
print(res, end='')
parse_table()
translate_source('print_flag/pf.scala')
|
# -*- coding:utf-8 -*-
"""
This is an usage Usage example of the "network" module. In this example, we train a network instance to recognize 28x28 pixels images of handwritten digits
The data used for the training is provided by the mnist database, and loaded by the "mnist_loader" module
"""
#get access to the root of the project
import os
import sys
sys.path.insert(1, str(os.getcwd()))
#The data is loaded with the mnist loader
import mnist_loader
#training, validation, and test data are lists of respectively 50000, 10000, and 10000 tuples.
#In each tuple, there is the input value x, a column matrix of 28x28 = 784 pixel greyscale values, and the expected output value y, representing the handwritten digit
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
training_data = list(training_data)
validation_data = list(validation_data)
test_data = list(test_data)
#We create a neural network with 28x28 = 784 input neurons, 30 hidden neurons, and 10 output neurons:
# - The activation of the 784 input neurons represent the greyscale value of the 28x28 pixels of a handwritten digit image
# - The hidden neurons add abstraction to the network and hence -> performance
# - The index of the most activated output neuron is the guessed digit
import network
#We name the model after the other created models
dirs = next(os.walk("models"))[1]
model_count = len(dirs) - 1
#You can tune :
# - the activation function (sigmoid by default)
# - the regulation of the outputs (none by default)
net = network.Network("hdr_" + str(model_count + 1), [784, 16, 10])
net_description = str(net)
print("\n" + net_description)
#The network is trained with this single line. It calls the SGD training method for the network instance.
#Method call : SGD(training_data, epochs, mini_batch_size, eta, test_data=None, dropout_value = 0.2)
# - training_data is the list of (input,expected_output) tuples (where inputs are 784 column matrixes)
# - epochs is the number of complete training cycles over the training data
# - mini_batch_size is the size of each batch (group of randomly chosen training examples) during the epoch
# - eta (by default 3), is the learning rate, it will be adjusted over epochs
# - min_eta (by default 0.5) is the minimum value the learning will attain while decreasing
# - test_data (None by default) is the test_data over which the network is evaluated after each epoch (for performance tracking, optionnal)
# - verbose (True by default) is wether or not you want to see the progress after each accuracy save (each flag)
# - flags per epoch (5 by default) is how many accuracy flags you want per epoch : at each flag, the learning rate is updated
# - display_weights (True by default) is you want to see the first layer's weights evolving in real time during the training, and save the graphical representation
# - dropout value (0 to 1, None by default), is the proportion of desactivated neurons during each gradient computation
# - optimize_accuracy (False by default, many bugs), is wether or not the model is keeping the best state which occured during training
net.SGD(training_data, 5, 10, display_weights=True)
#We serialize the trained model as a network object in a file named like itself ("hdr_x")
import pickle
with open("models/hd_recognition/hdr_{}.pickle".format(str(model_count + 1)), "wb") as saving:
saver = pickle.Pickler(saving)
saver.dump(net)
#Performance testing of the network on the validation data
accuracy = str(100 * net.evaluate(validation_data) / 10000)
print("\nTest on the validation data -> Accuracy : {0}%\n".format(accuracy))
#We save the train record
with open("models/hd_recognition/accuracy_ladder.md", "a") as ladder:
adding = net_description + ", accuracy = " + accuracy + "\n"
ladder.write(adding)
#And update the accuracy ladder (sorting best accuracies)
with open("models/hd_recognition/accuracy_ladder.md", "r") as ladder:
content = [net.split("= ") for net in ladder.read().split('\n')]
content.pop()
content_updated = sorted([(acc,net) for net,acc in content], reverse = True)
tostring = "\n".join(["= ".join((net,acc)) for acc,net in content_updated]) + "\n"
with open("models/hd_recognition/accuracy_ladder.md", "w") as ladder:
ladder.write(tostring)
#Prediction tests
re = False
#The asks variable permits to draw in the same figure each prediction
asks = 0
while re:
#The user choses a number to predict
re1 = True
while re1:
try:
chosen_nb = int(input("\nThere is an example for each digit in the custom_test_images folder. Enter the number you want the model to recognize based on theese custom test images : "))
assert chosen_nb >= 0 and chosen_nb <=9
re1 = False
except AssertionError:
print("\nError, the chosen number isn't a single digit.")
except ValueError:
print("\nError, you didn't enter a valid digit.")
#The image filename is retrieved
img_filename = "hd_recognition/custom_test_images/test_image_"+str(chosen_nb)+".bmp"
#Predicting the image
from PIL import Image
import numpy as np
test_image = Image.open(img_filename)
arr = 1 - np.array(test_image).reshape(784,1) / 255. #Conversion from image to array : 256-RGB to greyscale inverted (1 is black, 0 is white)
model_activations = net.feedforward(arr)
print("\nAccording to the AI, the plotted number is {0} !\n".format(np.argmax(model_activations)))
#Plotting the test_image, and the activations, in subplots (one plots the image, the other plots the model's activation)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
test_image = mpimg.imread(img_filename)
if asks == 0:
fig = plt.figure(figsize = (11, 5))
plt.show()
asks = 1
else:
plt.clf()
fig.canvas.draw()
fig.canvas.flush_events()
plt.subplot(121)
plt.title("custom image")
plt.imshow(test_image)
plt.subplot(122)
plt.title("corresponding model activations")
plt.xlabel("digit")
plt.ylabel("activation")
axes = plt.gca()
axes.set_ylim([0, 1])
plt.xticks(range(10))
plt.yticks(np.array(range(11))/10)
plt.plot(range(10), model_activations)
#Annotation function to pinpoint the activation on the second subplot
def annot_max(x, y, ax):
xmax = x[np.argmax(y)]
ymax = y.max()
text = "digit = {}, activation = {:.3f}".format(xmax,ymax)
if not ax:
ax=plt.gca()
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
arrowprops=dict(arrowstyle="->",connectionstyle="angle,angleA=0,angleB=60")
kw = dict(xycoords='data',textcoords="axes fraction",
arrowprops=arrowprops, bbox=bbox_props, ha="right", va="top")
ax.annotate(text, xy=(xmax, ymax), xytext=(xmax/10 - 0.1, ymax - 0.1), **kw)
annot_max(range(10), model_activations, axes)
#Ask for a new prediction
re = str(input("predict another custom digit ? (Y/N) : ")).lower() == "y"
|
class Player:
def __init__(self, the_name: str, the_token: str):
self.name = the_name
self.token = the_token
self.victory = False
|
import unittest
from subprocess import call
import removeoldbackups as rob
from os import path, utime
from freezegun import freeze_time
from datetime import timedelta, datetime
def touch(fname, times=None):
times = (times.timestamp(), times.timestamp())
with open(fname, 'a'):
utime(fname, times)
@freeze_time("1990-12-31")
class TestFileRemoval(unittest.TestCase):
test_folder = 'test_folder'
def setUp(self):
call(["mkdir", self.test_folder])
self.month_start = path.join(self.test_folder, 'month_start.txt')
self.month_end = path.join(self.test_folder, 'month_end.txt')
self.older_10 = path.join(self.test_folder, 'older_10.txt')
self.older_2 = path.join(self.test_folder, 'older_2.txt')
self.february_28 = path.join(self.test_folder, 'february_28.txt')
self.march_31 = path.join(self.test_folder, 'march_31.txt')
now = datetime.now()
touch(self.older_10, now - timedelta(days=10))
touch(self.older_2, now - timedelta(days=2))
touch(self.month_start, datetime(now.year, now.month, 1))
touch(self.month_end, datetime(now.year, now.month, 31))
touch(self.february_28, datetime(now.year, 2, 28))
touch(self.march_31, datetime(now.year, 3, 31))
def tearDown(self):
call(["rm", "-rf", self.test_folder])
def delete_files_older_than_week(self):
rob.run("(now - mtime) > timedelta(days=7)", self.test_folder, False)
self.assertEqual(path.isfile(self.month_start), False)
self.assertEqual(path.isfile(self.month_end), True)
self.assertEqual(path.isfile(self.older_10), False)
self.assertEqual(path.isfile(self.older_2), True)
self.assertEqual(path.isfile(self.february_28), False)
self.assertEqual(path.isfile(self.march_31), False)
def delete_files_not_start_end_of_month(self):
rob.run("mtime.day != 1 and mtime.day != monthend", self.test_folder, False)
self.assertEqual(path.isfile(self.month_start), True)
self.assertEqual(path.isfile(self.month_end), True)
self.assertEqual(path.isfile(self.older_10), False)
self.assertEqual(path.isfile(self.older_2), False)
self.assertEqual(path.isfile(self.february_28), True)
self.assertEqual(path.isfile(self.march_31), True)
def delete_files_not_start_end_of_month_or_older_than_week(self):
rob.run("mtime.day != 1 and mtime.day != monthend and (now - mtime) > timedelta(days=7)", self.test_folder, False)
self.assertEqual(path.isfile(self.month_start), True)
self.assertEqual(path.isfile(self.month_end), True)
self.assertEqual(path.isfile(self.older_10), False)
self.assertEqual(path.isfile(self.older_2), True)
self.assertEqual(path.isfile(self.february_28), True)
self.assertEqual(path.isfile(self.march_31), True)
def suite():
suite = unittest.TestSuite()
suite.addTest(TestFileRemoval('delete_files_older_than_week'))
suite.addTest(TestFileRemoval('delete_files_not_start_end_of_month'))
suite.addTest(TestFileRemoval('delete_files_not_start_end_of_month_or_older_than_week'))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite())
|
# -*- coding: utf-8 -*-
import scrapy
class GongzhonghaoauthSpider(scrapy.Spider):
name = 'GongZhongHaoAuth'
apiDict = {
"wechat_auth_list_api": "https://tianshucloud.cn/api/platform/weixin/internalAuthList?",
"wechat_refresh_taken_api": "https://tianshucloud.cn/api/platform/weixin/refreshToken?",
}
secret = 'be9eb337beb1cfefed645084f605838d'
#auth_list_json = get_json(wechat_auth_list_api, data={'secret': 'be9eb337beb1cfefed645084f605838d'})
#auth_list_json = get_json(wechat_refresh_taken_api, data={"id": id, 'secret': 'be9eb337beb1cfefed645084f605838d'})
|
#import the pygame library
import pygame
import time
import random
from snake import Snake
pygame.init()
#define the colours
black = (0,0,0)
white = (255,255,255)
brown = (134,72,22)
green = (0,255,0)
red = (255,0,0)
#open a window
size = (700,500)
d_width = 800
d_height = 600
screen = pygame.display.set_mode((d_width, d_height))
pygame.display.set_caption("Snake Game")
font_style = pygame.font.SysFont("verdana", 50)
score_font = pygame.font.SysFont("gadugi", 45)
def Your_score(score):
value = score_font.render("Your Score: " + str(score), True, black)
screen.blit(value, [0,0])
def our_snake(snake_block, snake_list):
for x in snake_list:
pygame.draw.rect(screen, green, [x[0], x[1], snake_block, snake_block])
def message(msg, colour, a, b):
mesg = font_style.render(msg, True, colour)
screen.blit(mesg, [a, b])
def game_Loop():
game_over = False
game_close = False
clock = pygame.time.Clock()
snake_List = []
snake_length = 1
x1_change = 0
y1_change = 0
x1 = d_width / 2
y1 = d_height / 2
snake_block = 10
snake_speed = 20
foodx = round(random.randrange(0, d_width - snake_block) / 10.0) * 10.0
foody = round(random.randrange(0, d_height - snake_block) / 10.0) * 10.0
while not game_over:
while game_close == True:
screen.fill(white)
message('GAME OVER! :(', red, 220, 100)
message('You scored: ' + str(snake_length -1), red, 220, 170)
message('Press P to play again', red, 160, 270)
message('Press Q to quit', red, 160, 340)
pygame.display.update()
#Main event loop
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
game_over = True
game_close = False
if event.key == pygame.K_p:
game_Loop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
game_close = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x1_change = -snake_block
y1_change = 0
elif event.key == pygame.K_RIGHT:
x1_change = snake_block
y1_change = 0
elif event.key == pygame.K_UP:
y1_change = -snake_block
x1_change = 0
elif event.key == pygame.K_DOWN:
y1_change = snake_block
x1_change = 0
if x1 >= 800 or x1 < 0 or y1 >= 600 or y1 < 0:
game_close = True
screen.fill(white)
x1 += x1_change
y1 += y1_change
pygame.draw.rect(screen, brown, [foodx, foody, snake_block, snake_block])
snake_Head = []
snake_Head.append(x1)
snake_Head.append(y1)
snake_List.append(snake_Head)
if len(snake_List) > snake_length:
del snake_List[0]
for x in snake_List[:-1]:
if x == snake_Head:
game_close = True
our_snake(snake_block, snake_List)
Your_score(snake_length - 1)
pygame.display.update()
if x1 == foodx and y1 == foody:
foodx = round(random.randrange(0, d_width - snake_block) / 10.0) * 10.0
foody = round(random.randrange(0, d_height - snake_block) / 10.0) * 10.0
snake_length += 1
snake_speed += 3
clock.tick(snake_speed)
pygame.quit()
quit()
game_Loop() |
import speech_recognition as sr
AUDIO_FILE=("audio.wav") #import the audio file
r=sr.Recognizer() #initialize the recognizer
with sr.AudioFile(AUDIO_FILE) as source:
audio=r.record(source)
try:
print("The audio file contains : "+r.recognize_google(audio))
except sr.UnknownValueError:
print("Could't understand the voice")
except sr.RequestError:
print("Could't get the result")
|
"""Caches remote artifacts on S3."""
from os import path
from shutil import rmtree
from tempfile import mkdtemp
from urllib import urlretrieve
import logging
import re
from stacker.lookups.handlers.default import handler as default_handler
from stacker.lookups.handlers.output import handler as output_handler
from stacker.session_cache import get_session
LOGGER = logging.getLogger(__name__)
COOKBOOK_PKG_PATTERN = re.compile(r'^cookbooks-[0-9]*\.(tar\.gz|tgz)')
def upload(provider, context, **kwargs): # pylint: disable=W0613
"""Ensure artifact is present on S3.
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
"""
always_upload_new_artifact = kwargs.get('always_upload_new_artifact',
False)
environment = kwargs.get('environment', 'all')
if kwargs.get('artifact_bucket_output'):
artifact_bucket = output_handler(
kwargs.get('artifact_bucket_output'),
provider=provider,
context=context
)
elif kwargs.get('artifact_bucket_xref'):
artifact_bucket = output_handler(
kwargs.get('artifact_bucket_xref'),
provider=provider,
context=context,
fqn=True
)
else:
artifact_bucket = kwargs.get('artifact_bucket')
if kwargs.get('s3_bucket_prefix_default'):
bucket_prefix = default_handler(
kwargs.get('s3_bucket_prefix_default'),
provider=provider,
context=context
)
else:
bucket_prefix = 's3_bucket_prefix'
if kwargs.get('source_url_default'):
source_url = default_handler(
kwargs.get('source_url_default'),
provider=provider,
context=context
)
else:
source_url = kwargs.get('source_url')
if kwargs.get('filename'):
filename = kwargs.get('filename')
else:
filename = source_url.split('/')[-1]
bucket_key = '%s/%s/%s' % (environment, bucket_prefix, filename)
session = get_session(provider.region)
client = session.client('s3')
if always_upload_new_artifact is False:
list_results = client.list_objects(
Bucket=artifact_bucket,
Prefix=bucket_key
)
if 'Contents' in list_results:
LOGGER.info('Skipping artifact upload; s3://%s/%s already '
'present.',
artifact_bucket,
bucket_key)
return True
LOGGER.info('Downloading %s...', source_url)
tmp_dir = mkdtemp()
file_cache_location = path.join(tmp_dir, filename)
urlretrieve(source_url, file_cache_location)
LOGGER.info('Uploading artifact %s to s3://%s/%s',
filename,
artifact_bucket,
bucket_key)
client.upload_file(file_cache_location,
artifact_bucket,
bucket_key,
ExtraArgs={'ServerSideEncryption': 'AES256'})
# Clean up cached download
rmtree(tmp_dir)
return True
|
try:
import readline
import rlcompleter
import atexit
import os
except ImportError:
print("Python shell enhancement modules not available")
else:
histfile = os.path.join(os.environ["HOME"], ".pythonhistory")
import rlcompleter
readline.parse_and_bind("tab: complete")
if os.path.isfile(histfile):
readline.read_history_file(histfile)
atexit.register(readline.write_history_file, histfile)
del os, histfile, readline, rlcompleter, atexit
print("Python shell history enable")
|
__author__ = "Narwhale"
#
# def select_sort(alist):
# """选择排序"""
#
# n = len(alist)
# for j in range(0,n-1):
# min_index = j
# for i in range(j+1,n):
# if alist[min_index] > alist[i]:
# min_index = i
#
# alist[min_index],alist[j] = alist[j],alist[min_index]
#
# li = [54,26,93,17,77,31,44,55,20]
# select_sort(li)
# print(li)
###################################
def select_sort(alist):
n = len(alist)
#外层循环,控制要交换多少次
for j in range(0,n-1):
#内层循环
#初定索引号为0的数字最小,让它和其他数值比较出最小的交换位置
min_index = j
for i in range(j+1,n):
if alist[min_index] > alist[i]:
#交换最小索引的标识
min_index = i
#退出内循环之后才进行位置交换
alist[j],alist[min_index] = alist[min_index],alist[j]
#j=0 min_index=0 (1,n)--->(0+1,n)--->(j+1)
#j=1 min_index=1 (2,n)--->(1+1,n)--->(j+1)
#j=2 min_index=2 (3,n)--->(2+1,n)--->(j+1)
#j=3 min_index=3 (4,n)--->(3+1,n)--->(j+1)
if __name__ == "__main__":
li = [54,26,93,17,77,31,44,55,20]
select_sort(li)
print(li)
|
from django.conf.urls.defaults import *
urlpatterns = patterns('django_webfaction.views',
url(r'^email/add/$', 'email_changeform'),
url(r'^email/(?P<id>\d+)/$', 'email_changeform'),
url(r'^email/$', 'email_changelist'),
)
|
"""
Liquid time constant snn
"""
import os
import shutil
import torch
from torch import nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.nn import init
from torch.autograd import Variable
import math
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def model_save(fn, model, criterion, optimizer):
with open(fn, 'wb') as f:
torch.save([model, criterion, optimizer], f)
def model_load(fn):
with open(fn, 'rb') as f:
model, criterion, optimizer = torch.load(f)
return model, criterion, optimizer
def save_checkpoint(state, is_best, prefix, filename='_asrnn_checkpoint.pth.tar'):
print('saving at ', prefix+filename)
torch.save(state, prefix+filename)
if is_best:
shutil.copyfile(prefix+filename, prefix+ '_asrnn_model_best.pth.tar')
def count_parameters(model):
return sum(p.numel() for p in model.network.parameters() if p.requires_grad)
###############################################################################################
############################### Define SNN layer #########################################
###############################################################################################
b_j0 = 1. # neural threshold baseline
R_m = 3 # membrane resistance
dt = 1
gamma = .5 # gradient scale
lens = 0.3
def gaussian(x, mu=0., sigma=.5):
return torch.exp(-((x - mu) ** 2) / (2 * sigma ** 2)) / torch.sqrt(2 * torch.tensor(math.pi)) / sigma
class ActFun_adp(torch.autograd.Function):
@staticmethod
def forward(ctx, input): # input = membrane potential- threshold
ctx.save_for_backward(input)
return input.gt(0).float() # is firing ???
@staticmethod
def backward(ctx, grad_output): # approximate the gradients
input, = ctx.saved_tensors
grad_input = grad_output.clone()
# temp = abs(input) < lens
scale = 6.0
hight = .15
# temp = torch.exp(-(input**2)/(2*lens**2))/torch.sqrt(2*torch.tensor(math.pi))/lens
temp = gaussian(input, mu=0., sigma=lens) * (1. + hight) \
- gaussian(input, mu=lens, sigma=scale * lens) * hight \
- gaussian(input, mu=-lens, sigma=scale * lens) * hight
# temp = gaussian(input, mu=0., sigma=lens)
return grad_input * temp.float() * gamma
# return grad_input
act_fun_adp = ActFun_adp.apply
def mem_update_adp(inputs, mem, spike, tau_adp,tau_m, b, dt=1, isAdapt=1):
alpha = tau_m
ro = tau_adp
if isAdapt:
beta = 1.8
else:
beta = 0.
b = ro * b + (1 - ro) * spike
B = b_j0 + beta * b
d_mem = -mem + inputs
mem = mem + d_mem*alpha
inputs_ = mem - B
spike = act_fun_adp(inputs_) # act_fun : approximation firing function
mem = (1-spike)*mem
return mem, spike, B, b
# def mem_update_adp(inputs, mem, spike, tau_adp,tau_m, b, dt=1, isAdapt=1):
# alpha = tau_m#torch.exp(-1. * dt / tau_m).cuda()
# ro = tau_adp#torch.exp(-1. * dt / tau_adp).cuda()
# # tau_adp is tau_adaptative which is learnable # add requiregredients
# if isAdapt:
# beta = 1.8
# else:
# beta = 0.
# b = ro * b + (1 - ro) * spike
# B = b_j0 + beta * b
# mem = mem * alpha + (1 - alpha) * R_m * inputs - B * spike * dt
# inputs_ = mem - B
# spike = act_fun_adp(inputs_) # act_fun : approximation firing function
# return mem, spike, B, b
def output_Neuron(inputs, mem, tau_m, dt=1):
"""
The read out neuron is leaky integrator without spike
"""
d_mem = -mem + inputs
mem = mem+d_mem*tau_m
return mem
###############################################################################################
###############################################################################################
###############################################################################################
class SNN_rec_cell(nn.Module):
def __init__(self, input_size, hidden_size,is_rec = True):
super(SNN_rec_cell, self).__init__()
# print('SNN-ltc ')
self.input_size = input_size
self.hidden_size = hidden_size
self.is_rec = is_rec
# self.rnn_name = 'SNN-ltc cell'
if is_rec:
self.layer1_x = nn.Linear(input_size+hidden_size, hidden_size)
else:
self.layer1_x = nn.Linear(input_size, hidden_size)
# self.layer1_tauAdp = nn.Linear(2*hidden_size, hidden_size)
# self.layer1_tauM = nn.Linear(2*hidden_size, hidden_size)
self.tau_adp = nn.Parameter(torch.Tensor(hidden_size))
self.tau_m =nn.Parameter(torch.Tensor(hidden_size))
self.act1 = nn.Sigmoid()
# nn.init.normal_(self.tau_adp, 200,50)
# nn.init.normal_(self.tau_m, 20,5)
nn.init.normal_(self.tau_adp, 4,1.)
nn.init.normal_(self.tau_m, 2,1.)
nn.init.xavier_uniform_(self.layer1_x.weight)
# nn.init.xavier_uniform_(self.layer1_tauAdp.weight)
# nn.init.xavier_uniform_(self.layer1_tauM.weight)
def forward(self, x_t, mem_t,spk_t,b_t):
if self.is_rec:
dense_x = self.layer1_x(torch.cat((x_t,spk_t),dim=-1))
else:
dense_x = self.layer1_x(x_t)
# tauM1 = torch.exp(-1./self.tau_m)
# tauAdp1 = torch.exp(-1./self.tau_adp)
tauM1 = self.act1(self.tau_m)
tauAdp1 = self.act1(self.tau_adp)
# tauM1 = self.act1(self.layer1_tauM(torch.cat((dense_x,mem_t),dim=-1)))
# tauAdp1 = self.act1(self.layer1_tauAdp(torch.cat((dense_x,b_t),dim=-1)))
mem_1,spk_1,_,b_1 = mem_update_adp(dense_x, mem=mem_t,spike=spk_t,
tau_adp=tauAdp1,tau_m=tauM1,b =b_t)
return mem_1,spk_1,b_1
def compute_output_size(self):
return [self.hidden_size]
class SNN(nn.Module):
def __init__(self, input_size, hidden_size,output_size, n_timesteps, P=10):
super(SNN, self).__init__()
print('SNN-lc ', P)
self.P = P
self.step = n_timesteps // self.P
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_timesteps = n_timesteps
self.rnn_name = 'SNN-lc cell'
self.snn1 = SNN_rec_cell(input_size,hidden_size,False)
self.snn2 = SNN_rec_cell(input_size,hidden_size,False)
self.snn3 = SNN_rec_cell(2*hidden_size,hidden_size)
self.layer3_x = nn.Linear(hidden_size,output_size)
# self.layer3_tauM = nn.Linear(output_size*2,output_size)
self.tau_m_o = nn.Parameter(torch.Tensor(output_size))
nn.init.constant_(self.tau_m_o, 0.)
# nn.init.constant_(self.tau_m_o, 0.)
nn.init.xavier_uniform_(self.layer3_x.weight)
self.act1 = nn.Sigmoid()
self.act2 = nn.Sigmoid()
self.act3 = nn.Sigmoid()
self.dp1 = nn.Dropout(0.1)#.1
self.dp2 = nn.Dropout(0.1)
self.dp3 = nn.Dropout(0.1)
def forward(self, inputs, h):
self.fr = 0
T = inputs.size()[1]
# outputs = []
hiddens = []
b,c,d1,d2 = inputs.shape
# for x_i in range(T):
x_down = F.avg_pool2d(inputs[ :,:,:,: ],kernel_size=4,stride=4)
x1 = x_down[:,0,:,:].view(b,self.input_size)
x2 = x_down[:,1,:,:].view(b,self.input_size)
mem_1,spk_1,b_1 = self.snn1(x1, mem_t=h[0],spk_t=h[1],b_t = h[2])
mem_2,spk_2,b_2 = self.snn2(x2, mem_t=h[3],spk_t=h[4],b_t = h[5])
spk_1_dp = self.dp1(spk_1)
spk_2_dp = self.dp2(spk_2)
mem_3,spk_3,b_3 = self.snn3(torch.cat((spk_2_dp,spk_1_dp),dim=-1), mem_t=h[6],spk_t=h[7],b_t = h[8])
# mem_3,spk_3,b_3 = self.snn3(spk_2_dp+spk_1_dp, mem_t=h[6],spk_t=h[7],b_t = h[8])
spk_3_dp = self.dp3(spk_3)
dense3_x = self.layer3_x(spk_3_dp)
tauM2 = self.act3(self.tau_m_o)
# tauM2 = self.act3(self.layer3_tauM(torch.cat((dense3_x, h[-2]),dim=-1)))
mem_out = output_Neuron(dense3_x,mem=h[-2],tau_m = tauM2)
out =mem_out
h = (mem_1,spk_1,b_1,
mem_2,spk_2,b_2,
mem_3,spk_3,b_3,
mem_out,
out)
f_output = F.log_softmax(out, dim=1)
# outputs.append(f_output)
hiddens.append(h)
self.fr = self.fr+ spk_1.detach().cpu().numpy().mean()/3\
+ spk_2.detach().cpu().numpy().mean()/3\
+ spk_3.detach().cpu().numpy().mean()/3
# output = torch.as_tensor(outputs)
final_state = h
# self.fr = self.fr/T
return f_output, final_state, hiddens
class SeqModel(nn.Module):
def __init__(self, ninp, nhid, nout, dropout=0.0, dropouti=0.0, dropouth=0.0, wdrop=0.0,
temporalwdrop=False, wnorm=True, n_timesteps=784, nfc=256, parts=10):
super(SeqModel, self).__init__()
self.nout = nout # Should be the number of classes
self.nhid = nhid
self.rnn_name = 'SNN'
self.network = SNN(input_size=ninp, hidden_size=nhid, output_size=nout,n_timesteps=n_timesteps, P=parts)
self.l2_loss = nn.MSELoss()
def forward(self, inputs, hidden):
# inputs = inputs.permute(2, 0, 1)
t = inputs.size()[1]
# print(inputs.shape) # L,B,d
outputs = []
for i in range(t):
f_output, hidden, hiddens= self.network.forward(inputs[:,i,:,:,:], hidden)
outputs.append(f_output)
recon_loss = torch.zeros(1, device=inputs.device)
return outputs, hidden, recon_loss
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return (weight.new(bsz,self.nhid).uniform_(),
weight.new(bsz,self.nhid).zero_(),
weight.new(bsz,self.nhid).fill_(b_j0),
# layer 2
weight.new(bsz,self.nhid).uniform_(),
weight.new(bsz,self.nhid).zero_(),
weight.new(bsz,self.nhid).fill_(b_j0),
# layer 3
weight.new(bsz,self.nhid).uniform_(),
weight.new(bsz,self.nhid).zero_(),
weight.new(bsz,self.nhid).fill_(b_j0),
# layer out
weight.new(bsz,self.nout).zero_(),
# sum spike
weight.new(bsz,self.nout).zero_(),
)
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from .models import GadgetSnap
def get_gadget_snaps():
snaps = [a for a in GadgetSnap.objects.exclude(
release__name='rolling-core').order_by('-release')]
snaps += [a for a in GadgetSnap.objects.filter(
release__name='rolling-core')]
return snaps
class GadgetSnapListPluginLarge(CMSPluginBase):
# Keeping the name short to be able to differentiate them
# in the editor dropdown
name = _("Snap list - Gadget")
render_template = "gadget_snap_list.html"
text_enabled = True
def render(self, context, instance, placeholder):
context.update({
'gadget_snap_list': get_gadget_snaps(),
})
return context
plugin_pool.register_plugin(GadgetSnapListPluginLarge)
class GadgetSnapListPluginSmall(CMSPluginBase):
# Keeping the name short to be able to differentiate them
# in the editor dropdown
name = _("Snap shortlist - Gadget")
render_template = "gadget_snap_shortlist.html"
text_enabled = True
def render(self, context, instance, placeholder):
context.update({
'gadget_snap_list': get_gadget_snaps(),
})
return context
plugin_pool.register_plugin(GadgetSnapListPluginSmall)
|
import matplotlib.pyplot as plt
import pandas as pd
from scipy.io import loadmat
import numpy as np
from scipy import signal
import math as mat
# reading as python dict
data_dict = loadmat('ecgca771_edfm.mat')
# extracting data array - the key is 'val'
data_array = data_dict['val']
# transpose for consistency
data_array = data_array.transpose(1, 0)
# convert to df
df = pd.DataFrame(data_array, columns=['ch' + str(n) for n in range(1, data_array.shape[1] + 1)])
# remove duplicates
# df = df.loc[~df.index.duplicated(keep='first')]
# same as
# ['ch1', 'ch2', 'ch3', 'ch4', 'ch5', 'ch6']
# visual inspection
df['ch3'].plot()
plt.title('Channel 3 Signal')
plt.savefig('out_signal3.png', dpi=128)
plt.close()
# Calculate relative amplitude dividing by 100
df_divided = df['ch3'] / 100
print(df_divided)
# calculate timing values
sampling_frequency = 1000
time_values = np.arange(0, df_divided.shape[0]) / sampling_frequency
print(time_values)
# function low pass filter
def low_pass_filter(input_data, fc):
k = 0.7 # cut off value
alpha = (1 - k * np.cos(2 * np.pi * fc) - np.sqrt(
2 * k * (1 - np.cos(2 * np.pi * fc)) - k ** 2 * np.sin(2 * np.pi * fc) ** 2)) / (1 - k)
y = signal.filtfilt(1 - alpha, [1, -alpha], input_data)
return y
baseline = low_pass_filter(df_divided, 0.7 / sampling_frequency)
X = df_divided - baseline
# function peak detection
def peak_detection(input_data, heart_rate, fflag=0):
N = input_data.shape[0]
peaks = np.arange(0, input_data.shape[0])
th = 0.5
rng = mat.floor(th / heart_rate)
if fflag:
flag = fflag
else:
flag = abs(max(input_data)) > abs(min(input_data))
if flag:
for j in N:
if (j > rng) and (j < N - rng):
index = np.arange(j - rng, j + rng + 1)
elif j > rng:
index = np.arange(N - 2 * rng, N + 1)
else:
index = np.arange(1, 2 * rng + 1)
if np.max(x[index]) == x[j]:
peaks[j] = 1
else:
for j in N:
if (j > rng) and (j < N - rng):
index = np.arange(j - rng, j + rng + 1)
elif j > rng:
index = np.arange(N - 2 * rng, N + 1)
else:
index = np.arange(1, 2 * rng + 1)
if min(x[index]) == x[j]:
peaks[j] = 1
# remove fake peaks
I = find(peaks)
d = diff(I)
peaks[I[d < rng]] = 0
return peaks
# Modeling maternal ECG
parameter_for_peak_detection = 1.35
# peaks1 = peak_detection(X, parameter_for_peak_detection/sampling_frequency)
# I = find(peaks1)
# visual inspection
X.plot()
plt.title('Maternal ECG Peaks')
plt.savefig('maternal_ecg_peaks.png', dpi=128)
plt.close()
t = np.linspace(0, 1.0, 2001)
low = np.sin(2 * np.pi * 5 * t)
high = np.sin(2 * np.pi * 250 * t)
x = low + high
b, a = signal.butter(8, 0.125)
y = signal.filtfilt(b, a, x, padlen=150)
np.abs(y - xlow).max()
b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
np.random.seed(123456)
n = 60
sig = np.random.randn(n) ** 3 + 3 * np.random.randn(n).cumsum()
gust = signal.filtfilt(b, a, sig, method="gust")
pad = signal.filtfilt(b, a, sig, padlen=50)
plt.plot(sig, 'k-', label='input')
plt.plot(gust, 'b-', linewidth=4, label='gust')
plt.plot(pad, 'c-', linewidth=1.5, label='pad')
plt.legend(loc='best')
plt.show()
|
test_case = int(input())
for _ in range(test_case):
a = list(map(int, input().split()))
print(sum(a)//2)
|
from flask_login import login_required
from views.base_view import BaseView
class IndexView(BaseView):
@login_required
def get(self):
return self.render_template('index.html', page_name='Index')
|
from urllib.parse import urlencode
params = {
'name':'zjx',
'age':23,
}
base_url = 'http://zhaojiaxing.top?'
url = base_url+urlencode(params)
print(url) |
# Generated by Django 2.0.3 on 2018-03-13 04:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stock', '0002_product_vid'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='vid',
),
migrations.DeleteModel(
name='Vid',
),
]
|
from common.run_method import RunMethod
import allure
@allure.step("小程序/基础/发送短信验证码")
def verificationCode_receiveVerificationCode_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "小程序/基础/发送短信验证码"
url = f"/service-user/verificationCode/receiveVerificationCode"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/首页/用户发送验证码")
def verificationCode_receiveVerificationCode_customer_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/首页/用户发送验证码"
url = f"/service-user/verificationCode/receiveVerificationCode/customer"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
def check_three_and_two(array):
return [2,3] == sorted([array.count(x) for x in set(array)])
'''
Given an array with 5 string values "a", "b" or "c", check if
the array contains three and two of the same values.
Examples
["a", "a", "a", "b", "b"] ==> true // 3x "a" and 2x "b"
["a", "b", "c", "b", "c"] ==> false // 1x "a", 2x "b" and 2x "c"
["a", "a", "a", "a", "a"] ==> false // 5x "a"
'''
|
from Dota2AbilitiesForAlexa.dota2_skill_builder import Dota2SkillBuilder
from Dota2AbilitiesForAlexa.json_parser import JsonParser
|
def terminate_app():
print 'Are you done? (y/n)'
choice = raw_input()
if choice.lower() == 'n':
from display_menu import display_menu
display_menu()
|
comp = 0
cont = 0
def max_heapfy(lista, raiz, tamanho):
esq = 2 * raiz + 1
dir = 2 * raiz + 2
maior = raiz
global comp
if esq < tamanho and lista[esq] > lista[raiz]:
maior = esq
if dir < tamanho and lista[dir] > lista[maior]:
maior = dir
if maior != raiz:
lista[maior], lista[raiz] = lista[raiz], lista[maior]
max_heapfy(lista, maior, tamanho)
comp += 1
def heap_sort(lista):
n = len(lista)
global comp
for i in range(n // 2, -1, -1):
max_heapfy(lista, i, n - 1)
for i in range(n - 1, 0, -1):
lista[0], lista[i] = lista[i], lista[0]
max_heapfy(lista, 0, i)
print("Lista ordenada:", lista)
print("Comparações:", comp)
return lista
def gerar(int):
from random import randint
tamanho = int
resposta = [0] * tamanho
for i in range(tamanho):
resposta[i] = randint(0, 100)
print("Lista não ordenada:", resposta, "\n")
return heap_sort(resposta)
print("Qual o tamanho do vetor:")
print("1 - 5\n2 - 10\n3 - 100\n4 - 1000\n5 - 10000\n")
vetor = int(input())
while vetor < 1 or vetor > 5:
print("Opção invalida.")
print("Qual o tamanho do vetor:")
print("1 - 5\n2 - 10\n3 - 100\n4 - 1000\n5 - 10000\n")
vetor = int(input())
if vetor == 1:
vetor = 5
elif vetor == 2:
vetor = 10
elif vetor == 3:
vetor = 100
elif vetor == 4:
vetor = 1000
else:
vetor = 10000
for x in range(51):
cont += comp
comp = 0
gerar(vetor)
print("-" * 100)
print("Media de comparações:", cont / 50)
|
def obter_salario_atual():
return float(input('Informe o salário atual: '))
def obter_porcentagem_de_aumento(salario):
if salario <= 280:
porcentagem = 20
elif salario <= 700:
porcentagem = 15
elif salario <= 1500:
porcentagem = 10
else:
porcentagem = 5
return porcentagem
def obter_valor_do_aumento(salario, porcentagem):
return salario * (porcentagem / 100)
if __name__ == '__main__':
print('Calculadora de salário ajustado')
salario_atual = obter_salario_atual()
porcentagem_de_aumento = obter_porcentagem_de_aumento(salario_atual)
valor_do_aumento = obter_valor_do_aumento(salario_atual, porcentagem_de_aumento)
novo_salario = salario_atual + valor_do_aumento
print('Salário atual: {:.2f}'.format(salario_atual))
print('Porcentagem de aumento: {:.0f}%'.format(porcentagem_de_aumento))
print('Valor do aumento: {:.2f}'.format(valor_do_aumento))
print('Novo salário ajustado: {:.2f}'.format(novo_salario))
|
from __future__ import print_function
from __future__ import division
from pyLM.units import *
import h5py
import numpy as np
import os
filename_lm='morph_dend.lm'
filename_morph='CA1dend_small.h5'
NA = 6.022e23
##
##
##
print('Set a simulation space.')
latticeSpacing=nm(8)
#sim=RDME.RDMESimulation(dimensions=micron(2.048,2.048,2.048), \
# spacing=latticeSpacing)
print('latticeSpacing: ', latticeSpacing)
print('Define regions')
print('Insert dendritic geometry.')
with h5py.File(filename_morph,'r') as r:
dendrite = r['dendrite'][()]
dendrite_not_mitochondrion = r['dendrite not mitochondrion'][()]
PSD = r['PSD'][()]
membrane_area = r['membrane areas'][()]
pitch = r['unit length per voxel (um)'][()]
num_voxel_dend = r['voxel num of dendrite not mitochondrion'][()]
volume = np.sum(dendrite > 0)
#morpho = buildAnyShape(sim, volume, domains, membrane_area, PSD)
print('num_voxel_dend: ', volume)
print('num_voxel_dend: ', num_voxel_dend)
volume_in_L = num_voxel_dend * latticeSpacing * latticeSpacing * latticeSpacing * 1000
volume_in_m3 = num_voxel_dend * latticeSpacing * latticeSpacing * latticeSpacing
print('Volume in um3: ',volume_in_m3*(1e6)*(1e6)*(1e6))
print('Volume in L: ', volume_in_L )
print('Volume in fL: ', volume_in_L *1e15 )
# 6000 moleules per 100uM CaM and 0.1fL Spine
number_1umol = NA /(1e6)
number_1uM = number_1umol * volume_in_L
print('number_per_1uM:', number_1uM)
print('Set molecules')
conc_Ca = 100 # uM
conc_CaM = 100 # uM
conc_CB = 120 # uM
conc_CN = 0.5 # uM
num_Ca = int(conc_Ca * number_1uM)
num_CaM = int(conc_CaM * number_1uM)
num_CB = int(conc_CB * number_1uM)
num_CN = int(conc_CN * number_1uM)
print('num_Ca : ', num_Ca )
print('num_CaM: ', num_CaM)
print('num_CB : ', num_CB )
print('num_CN : ', num_CN )
|
import re
def make_header(title):
header = '<!DOCTYPE HTML PUBLIC>\n'
header += '<html>\n'
header += '<head>\n'
header += '<title>' + str(title) + '</title>\n'
header += '<script src="../sorttable.js"></script>\n'
header += '<link rel="stylesheet" type="text/css" href="../style.css">\n'
header += '</head>\n'
return header
def make_body_start():
return '<body>\n'
def make_body_end():
return '</body>\n'
def make_footer():
return '</html>\n'
def make_masthead(links, active_index):
#masthead = '<div class="container">\n'
#masthead += '<div class="masthead">\n'
#masthead = '<ul class="nav">\n'
masthead = '<div id="hmenu">\n'
masthead += '<ul class="nav">\n'
for index, link in enumerate(links):
if index == active_index:
masthead += '<li class="active">' + link + '</li>\n'
else:
masthead += '<li>' + link + '</li>\n'
#masthead += '</ul>\n'
masthead += '</ul>\n'
masthead += '</div>\n'
return masthead
def make_heading(text, level=1, align=None):
heading = '<h' + str(level)
if align is not None:
heading += ' align="' + align + '"'
heading += '>' + text + '</h' + str(level) + '>'
return heading
def make_paragraph(text, align=None, id=None):
p = '<p'
if id is not None:
p += ' id="' + id + '"'
if align is not None:
p += ' align="center"'
p += '>' + text + '</p>\n'
return p
def make_link(dest, text, new_window=False):
link = '<a href="' + dest + '"'
if new_window:
link += ' target="_blank"'
link += '>' + text + '</a>'
return link
def make_table_start(col_aligns=None, col_widths=None, style='t1'):
table = '<table class="' + style + '">\n'
if col_widths is not None:
for width in col_widths:
table += '<col width="' + str(width) + '">\n'
return table
def make_table_header(header):
table = '<tr>'
for h in header:
table += '<th>' + h + '</th> '
table += '</tr>\n'
return table
def make_table_row(row, colours=None):
table = '<tr>'
for i, r in enumerate(row):
if colours is not None:
table += '<td style="color: rgb' + str(colours[i]) + '">' + r + '</td> '
else:
table += '<td>' + r + '</td> '
table += '</tr>\n'
return table
def make_table_end():
return '</table>\n'
def replace_chars(text):
text = re.sub(' ', '_', text)
text = re.sub('/', '_', text)
return text
def make_image(source):
image = '<center><img src="'
image += source
image += '" align="middle" ></center>\n'
return image |
from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
class Survey:
@staticmethod
def validate_survey(survey):
results = connectToMySQL('dojo_survey_schema')
is_valid = True
if len(survey['name']) < 1:
flash ("Name must be at least 1 character long*")
is_valid = False
if len(survey['comment']) < 1:
flash ("Comments are required*")
is_valid = False
return is_valid |
class Second1:
color = "red" #Параметры
form = "cube "
kolichestvo = 1
def changecolor(self, newcolor): #Методы
self.color = newcolor
def changeform(self, newform): #Методы
self.form = newform
def changekolichestvo(self, newkolichestvo):
self.kolichestvo = newkolichestvo
class Second2:
color = "black" #Параметры
form = "circle"
kolichestvo = 2
def changecolor(self, newcolor): #Методы
self.color = newcolor
def changeform(self, newform): #Методы
self.form = newform
def changekolichestvo(self, newkolichestvo):
self.kolichestvo = newkolichestvo
obj1 = Second1()
obj2 = Second2()
print("Первый объект - ", obj1.color + ", " + obj1.form+",", obj1.kolichestvo)
print("Второй объект - ", obj2.color + ", " + obj2.form+",", obj2.kolichestvo)
obj1.changecolor("Оранжевый")
obj1.changeform("oval")
obj1.changekolichestvo(4)
obj2.changecolor(obj1.color)
obj2.changeform(obj1.form)
obj2.changekolichestvo(obj1.kolichestvo)
print("Первый объект - ", obj1.color + ", " + obj1.form+",", obj1.kolichestvo)
print("Второй объект - ", obj2.color + ", " + obj2.form+",", obj2.kolichestvo)
|
def replace(s):
ans = s.replace(' ', '%20')
return ans
def main():
s = 'i have a dream'
ans = replace(s)
print ans
if __name__ == '__main__':
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from .smqtk_search import SmqtkSimilaritySearch
from .smqtk_iqr import SmqtkIqr
from .smqtk import Smqtk
from .settings import SmqtkSetting
def load(info):
smqtkSetting = SmqtkSetting()
for setting in smqtkSetting.requiredSettings:
smqtkSetting.get(setting)
info['apiRoot'].smqtk_similaritysearch = SmqtkSimilaritySearch()
info['apiRoot'].smqtk_iqr = SmqtkIqr()
info['apiRoot'].smqtk = Smqtk()
|
from mcts import MCTS
from nodes import *
from motion_domain_1d import dynamics, random_act_generator, terminal_estimator
import numpy as np
import matplotlib.pyplot as plt
rng = np.random.RandomState(15)
mcts = MCTS(dynamics,random_act_generator,terminal_estimator=terminal_estimator,rng=rng)
# state = np.zeros(2)
state = 0.
def plot(state):
obst=plt.Circle((0.5, 0.5), 0.05, color='r')
goal=plt.Circle((1., 1.), 0.01, color='g')
agent=plt.Circle(tuple(state/30.), 0.01, color='b')
plt.gcf().gca().add_artist(obst)
plt.gcf().gca().add_artist(goal)
plt.gcf().gca().add_artist(agent)
plt.show()
steps = 60
r_sum = 0.
for i in range(steps):
# plot(state)
action = mcts.plan(state)
state,r = dynamics(state,action,rng)
print(state)
print(r)
r_sum+=r
print('Average reward: {}'.format(r_sum/steps))
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from tornado.routing import Matcher
from webservice.webmodel.RequestParameters import RequestParameters
from tornado.httputil import HTTPServerRequest
class RemoteCollectionMatcher(Matcher):
def __init__(self, collections_config: str):
self._collections_config = collections_config
self._remote_collections = None
def get_remote_collections(self):
if self._remote_collections is None:
self._remote_collections = self._get_remote_collections(self._collections_config)
return self._remote_collections
@staticmethod
def _get_remote_collections(collections_config: str):
_remote_collections = {}
with open(collections_config, 'r') as f:
collections_yaml = yaml.load(f, Loader=yaml.FullLoader)
for collection in collections_yaml['collections']:
if "path" in collection and collection['path'].startswith('http'):
_remote_collections[collection["id"]] = {k.replace('-', '_'): v for k, v in collection.items()}
return _remote_collections
def match(self, request: HTTPServerRequest):
if RequestParameters.DATASET in request.query_arguments:
# the returmed values are not used because I did not find how to use them
# just return empty dict() works to signify the request matches
# TODO do not hardcode utf-8, no time to do better today
collection = request.query_arguments[RequestParameters.DATASET][0].decode('utf-8')
if collection in self._remote_collections:
return dict()
# when request does not match
return None |
#encoding=utf8
from jieba import analyse
textrank = analyse.textrank
text = '非常线程是程序执行时的最小单位,它是进程的一个执行流,\ 是CPU调度和分派的基本单位,一个进程可以由很多个线程组成,\ 线程间共享进程的所有资源,每个线程有自己的堆栈和局部变量。\ 线程由CPU独立调度执行,在多CPU环境下就允许多个线程同时运行。\ 同样多线程也可以实现并发操作,每个请求分配一个线程来处理。'
keywords = textrank(text,topK=10,withWeight=True,allowPOS=('ns','n','vn','v'))
for key in keywords:
print(key) |
#encoding: utf-8
from openpyxl import load_workbook
from openpyxl.utils import get_column_letter as num2col
from openpyxl.utils import column_index_from_string as col2num
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
wb1 = load_workbook(filename="yun.xlsx")
ws1 = wb1.get_sheet_names()
s1 = wb1.get_sheet_by_name(ws1[0])
wb2 = load_workbook(filename="lims.xlsx")
ws2 = wb2.get_sheet_names()
s2 = wb2.get_sheet_by_name(ws2[0])
wb3 = load_workbook(filename="module.xlsx")
ws3 = wb3.get_sheet_names()
s3 = wb3.get_sheet_by_name(ws3[0])
mrow = s2.max_row
mcol = s2.max_column
for m in range(2,mrow+1):
for n in range(3,mcol):
#ascii a 97, till {|}~ not charactor
n2=n-2
n=num2col(n)
i='%s%d'%(n,m)
n2=num2col(n2)
i2='%s%d'%(n2,m)
tmp=s2[i].value
s3[i2].value=tmp
#fun2
for m in range(2,mrow+1):
n="AC"
i='%s%d'%(n,m)
tmp=s2[i].value
if tmp == "未知3":
i2='%s%d'%("AL",m)
tmp2=s2[i2].value
i3='%s%d'%("AI",m)
s3[i3].value=tmp2
i4='%s%d'%("AJ",m)
s3[i4].value=""
#fun3 short
mrow3 = s3.max_row
mrow1 = s1.max_row
for m1 in range(4,mrow1+1):
n = 0
i1 = '%s%d'%("B",m1)
u1 = s1[i1].value
ig1 = '%s%d'%("K",m1)
g1 = s1[ig1].value
ir1 = '%s%d'%("M",m1)
r1 = s1[ir1].value
for m3 in range(2,mrow3+1):
i3 = '%s%d'%("AA",m3)
u3 = s3[i3].value
if u1 == u3:
ig3 = '%s%d'%("AJ",m3)
s3[ig3].value = g1
ir3 = '%s%d'%("AK",m3)
s3[ir3].value = r1
break
wb3.save('hh.xlsx')
wb3.close() |
from django.shortcuts import render
from .models import Movie
from .serializers import MovieSerializer
from rest_framework import generics
# Create your views here.
class MovieList(generics.ListCreateAPIView):
queryset = Movie.objects.all()
serializer_class = MovieSerializer
class MovieDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Movie
serializer_class = MovieSerializer
|
Python 3.8.5 (tags/v3.8.5:580fbb0, Jul 20 2020, 15:43:08) [MSC v.1926 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> import tkinter as tk
>>>
[DEBUG ON]
>>>
[DEBUG OFF]
>>>
================================ RESTART: Shell ================================
>>>
>>> import tkinter as tk
>>>
class Win1:
def __init__(self, master):
self.master = master
self.master.geometry("400x400")
self.frame = tk.Frame(self.master)
self.butnew("Click to open Window 2", "2", Win2)
self.butnew("Click to open Window 3", "3", Win3)
self.frame.pack()
def butnew(self, text, number, _class):
tk.Button(self.frame, text = text, command= lambda: self.new_window(number, _class)).pack()
def new_window(self, number, _class):
self.new = tk.Toplevel(self.master)
_class(self.new, number)
|
for hora in range(24):
for minuto in range(0,60,30):
print(str(hora)+ " : "+str(minuto)) |
import chainer
import chainer.functions as F
import chainer.links as L
class Generator(chainer.Chain):
"""docstring for Generator"""
def __init__(self):
super(Generator, self).__init__(
l1=L.Linear(100,128*16*16),
dcv1=L.Deconvolution2D(in_channels=128,out_channels=64,ksize=4,stride=2,pad=1),
dcv2=L.Deconvolution2D(in_channels=64,out_channels=1,ksize=4,stride=2,pad=1),
bc1=L.BatchNormalization(size=128),
bc2=L.BatchNormalization(size=64))
self.in_size=100
self.out_size=1*64*64
self.imshape=(1,64,64)
def __call__(self, x, train=True):
h1 = F.relu(self.bc1(F.reshape(self.l1(x),(x.data.shape[0],128,16,16))))
h2 = F.relu(self.bc2(self.dcv1(h1)))
return self.dcv2(h2)
class Discriminator(chainer.Chain):
"""docstring for Discriminator"""
def __init__(self):
super(Discriminator, self).__init__(
conv1=L.Convolution2D(in_channels=1,out_channels=64,ksize=5,stride=2,pad=2),
conv2=L.Convolution2D(in_channels=64,out_channels=128,ksize=5,stride=2,pad=2),
bc1=L.BatchNormalization(size=64),
bc2=L.BatchNormalization(size=128),
l1=L.Linear(128*16*16, 1))
self.in_size = 1*64*64
self.out_size = 1
self.imshape=(1,64,64)
def __call__(self, x, train=True):
h1 = F.leaky_relu(self.bc1(self.conv1(x)))
h2 = F.leaky_relu(self.bc2(self.conv2(h1)))
return self.l1(h2) |
class Cell:
"""
Cell(row, col)
A class represent each cell in canvas
Parameters
----------
row : int
The row position coordinate of the cell
col : int
The column position coordinate of the cell
Attributes
----------
current_row : int
The row position coordinate of the cell
current_col : int
The column position coordinate of the cell
current_state : int
0: empty
1: occupied
2: target
3: obstacle
4: walked over
"""
UNMARKED = 0
PEDESTRIAN = 1
TARGET = 2
OBSTACLE = 3
WALKOVER = 4
def __init__(self, row, col):
self.current_row = row
self.current_col = col
self.current_state = 0
self.arrived = 0
def set_position(self, row, col):
"""
set the cell position
Parameters
----------
row : int
col : int
Returns
-------
None
"""
# update the position of cell
self.current_row = row
self.current_col = col
def find_position(self):
"""
find the current position
Returns
-------
"""
# return the position of cell
return self.current_row, self.current_col
def set_state(self, state):
"""
set the current state
state :
Returns
-------
None
"""
self.current_state = state
def get_state(self):
"""
get the current state
Returns
-------
int:
current state
"""
return self.current_state
class Pedestrian(Cell):
"""
Cell(row, col)
A class represent each Pedestrian in canvas
Parameters
----------
row : int
The row position coordinate of the cell
col : int
The column position coordinate of the cell
Attributes
----------
current_row : int
The row position coordinate of the cell
current_col : int
The column position coordinate of the cell
current_state : int
0: empty
1: occupied
2: target
3: obstacle
4: walked over
next_row : int
The future row position coordinate of the cell
next_col : int
The future column position coordinate of the cell
arrived : int
0: no
1: yes
speed : int
the number of cells moved in one time step
"""
# class present each pedestrian in canvas
def __init__(self, row, col, search_strategy):
super().__init__(row, col)
self.current_state = 1
self.arrived = False
self.speed = 1
self.next_row = row
self.next_col = col
self.search_strategy = search_strategy
def is_arrived(self):
"""
set arrvied to true
Returns
-------
None
"""
self.arrived = True
def set_next_position(self, util_map):
"""
set the next position of cells
Parameters
----------
util_map : array, shape(rows, cols)
Returns
-------
None
"""
row = self.current_row
column = self.current_col
best_n = self.search_strategy(util_map, row, column)
self.next_row = best_n[0]
self.next_col = best_n[1]
def get_next_position(self):
"""
get the next position of cell
Returns
-------
tuple:
next position
"""
return self.next_row, self.next_col
def update_peds(self, target, cells):
"""
update positon of pedestrians
Parameters
----------
target : Target
target object
cells : array, shape(rows, cols)
array contents all cells objects
Returns
-------
None
"""
if (self.current_row, self.current_col) == target.find_position():
# cells[self.find_position()].set_state(Cell.TARGET)
pass
elif self.get_next_position() == target.find_position():
self.is_arrived()
cells[self.find_position()].set_state(Cell.WALKOVER)
self.set_position(self.get_next_position()[0], self.get_next_position()[1])
# cells[self.find_position()].set_state(Cell.TARGET)
else:
cells[self.find_position()].set_state(Cell.WALKOVER)
self.set_position(self.get_next_position()[0], self.get_next_position()[1])
# cells[self.find_position()].set_state(Cell.PEDESTRIAN)
def rewrite_peds_pos(self, target, cells):
"""
if pedestrian not reach the target, set the cell state to PEDESTRIAN
Parameters
----------
target : Target
target object
cells : array, shape(rows, cols)
array contents all cells objects
Returns
-------
None
"""
if (self.current_row, self.current_col) != target.find_position():
cells[self.find_position()].set_state(Cell.PEDESTRIAN)
|
from pyspark.sql import SparkSession
from pyspark.ml.feature import *
from pyspark.ml.regression import LinearRegression
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.clustering import KMeans
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.ml import Pipeline
from pyspark.sql.functions import udf, concat
from pyspark.sql.types import IntegerType
import regex as re
# SPARK only takes numerical variables for ML
spark = SparkSession \
.builder \
.appName("ml_with_spark") \
.getOrCreate()
# read data; dataset is from https://stackoverflow.blog/2009/06/04/stack-overflow-creative-commons-data-dump/
sto_data = spark.read.json("data/train_onetag_small.json") # sto = stack overflow
sto_data.head()
# Tokenization: Splitting strings into separate words: https://spark.apache.org/docs/latest/ml-features.html#tokenizer
regexTokenizer = RegexTokenizer(inputCol="Body", outputCol="words", pattern="\\W")
sto_data = regexTokenizer.transform(sto_data) # new column, where split words are saved as list
# count number of words for each body
body_length = udf(lambda x: len(x), IntegerType())
sto_data = sto_data.withColumn("BodyLength", body_length(sto_data.words))
# count the number of paragraphs and links in each body
no_para = udf(lambda x: len(re.findall("</p>", x)), IntegerType())
no_links = udf(lambda x: len(re.findall("</a>", x)), IntegerType())
sto_data = sto_data.withColumn("NumParagraphs", no_para(sto_data.Body))
sto_data = sto_data.withColumn("NumLinks", no_links(sto_data.Body))
print(sto_data.take(2)) # display 2 rows, nicer format than .head()
# Vector Assembler: Taking three columns into a Vector, prerequisite for normalization of numeric features
assembler = VectorAssembler(inputCols=["BodyLength", "NumParagraphs", "NumLinks"], outputCol="NumFeatures")
sto_data = assembler.transform(sto_data)
print(sto_data.take(2))
# Normalization of Vectors
scaler = Normalizer(inputCol="NumFeatures", outputCol="ScaledNumFeatures")
sto_data = scaler.transform(sto_data)
print(sto_data.take(2))
# Scale
scaler = StandardScaler(inputCol="NumFeatures", outputCol="ScaledNumFeatures_scaler")
scalerModel = scaler.fit(sto_data)
sto_data = scalerModel.transform(sto_data)
print(sto_data.take(2))
# PART 2: Further Feature Engineering
# Count Vectorizer
# Says, how often a particular word appears and creates a vocabulary
cv = CountVectorizer(inputCol="words", outputCol="TF", vocabSize=1000)
cvmodel = cv.fit(sto_data)
sto_data = cvmodel.transform(sto_data)
print(sto_data.take(2))
# show vocabulary
print(cvmodel.vocabulary)
print(cvmodel.vocabulary[-10:])
# Inter-document Frequency: puts absolute word numbers from before as relative numbers within the dataset
idf = IDF(inputCol="TF", outputCol="TFIDF")
idfmodel = idf.fit(sto_data)
sto_data = idfmodel.transform(sto_data)
print(sto_data.take(2))
# StringIndexer: takes a string and gives it an index - so that it is numerical
indexer = StringIndexer(inputCol="oneTag", outputCol="label")
indexermodel = indexer.fit(sto_data)
sto_data = indexermodel.transform(sto_data)
print(sto_data.take(2))
# QUIZ
# Q1: Question Id = 1112; How many words does the body contain?
q1112 = sto_data.select(["Id", "BodyLength"]).where(sto_data.Id == 1112)
print(q1112.show())
# Q2: Create a new column, that concatenates question title and body; apply the function that counts the number of words
# in this column. Whats the value in this column for qId: 5123?
sto_data = sto_data.withColumn("TitleBody", concat("Title", "Body"))
regexTokenizer = RegexTokenizer(inputCol="TitleBody", outputCol="TitleBodyWords", pattern="\\W")
sto_data = regexTokenizer.transform(sto_data) # new column, where split words are saved as list
no_title_body = udf(lambda x: len(x))
sto_data = sto_data.withColumn("TitleBodyCount", no_title_body(sto_data.TitleBodyWords))
q5123 = sto_data.select(["Id", "TitleBodyWords", "TitleBodyCount"]).where(sto_data.Id == 5123)
print(q5123.show())
# Q3: Using normalizer, whats the normalized value for qId: 512?
sto_data = sto_data.withColumn("TitleBodyCount", sto_data.TitleBodyCount.cast(IntegerType()))
assembler = VectorAssembler(inputCols=["TitleBodyCount"], outputCol="TitleBodyVector")
sto_data = assembler.transform(sto_data)
scaler = Normalizer(inputCol="TitleBodyVector", outputCol="TitleBodyNormalizer")
sto_data = scaler.transform(sto_data)
q512 = sto_data.select(["Id", "TitleBodyNormalizer"]).where(sto_data.Id == 512)
print(q512.show())
# Q4: Using the StandardScaler (mean and std), whats the normalized value for qId: 512?
sto_data = sto_data.drop("TitleBodyScaler")
scaler = StandardScaler(inputCol="TitleBodyVector", outputCol="TitleBodyScaler", withStd=True, withMean=True)
scalerModel = scaler.fit(sto_data)
sto_data = scalerModel.transform(sto_data)
q512 = sto_data.select(["Id", "TitleBodyScaler"]).where(sto_data.Id == 512)
print(q512.show())
# Q5: Using MinMaxScaler, whats the normalized value for qId: 512?
scaler = MinMaxScaler(inputCol="TitleBodyVector", outputCol="TitleBodyMinMaxScaler")
scalerModel = scaler.fit(sto_data)
sto_data = scalerModel.transform(sto_data)
q512 = sto_data.select(["Id", "TitleBodyMinMaxScaler"]).where(sto_data.Id == 512)
print(q512.show())
# LINEAR REGRESSION
lr = LinearRegression(maxIter=5, regParam=0.0, fitIntercept=False, solver="normal")
train_data = sto_data.select(col("NumParagraphs").alias("label"), col("TitleBodyVector").alias("features"))
lrModel = lr.fit(train_data)
lrModel.coefficients
lrModel.intercept
lrModel.summary
lrModel.summary.r2
# LOGISTIC REGRESSION
train_data = sto_data.select(col("label").alias("label"), col("TFIDF").alias("features"))
lr = LogisticRegression(maxIter=10, regParam=0.0)
lrModel = lr.fit(train_data)
lrModel.coefficientMatrix
lrModel.summary.accuracy
lrModel.interceptVector
# K-MEANS CLUSTERING
train_data = sto_data.select(col("TitleBodyCount").alias("feature"))
assembler = VectorAssembler(inputCols=["feature"], outputCol="features")
train_data = assembler.transform(train_data)
train_data = train_data.drop("feature")
kmeans = KMeans().setK(5).setSeed(42)
model = kmeans.fit(train_data)
centers = model.clusterCenters()
for center in centers:
print(center)
# OTHER QUESTIONS:
# Q1: How many times greater is the TitleBodyCount of the longest question than the TitleBodyCount
# of the shortest question (rounded to the nearest whole number)?
sto_data.createOrReplaceTempView("sto_data")
spark.sql("""
select max(TitleBodyCount) / min(TitleBodyCount)
FROM sto_data
""").show()
# Q2: What is the mean and standard deviation of the TitleBodyCount?
# create a temporary view to run sql queries
spark.sql("""
select mean(TitleBodyCount), std(TitleBodyCount)
FROM sto_data
""").show()
# PIPELINES: https://spark.apache.org/docs/latest/ml-pipeline.html
idf = IDF(inputCol="TF", outputCol="features")
idfmodel = idf.fit(sto_data)
sto_data = idfmodel.transform(sto_data)
lr = LogisticRegression(maxIter=10, regParam=0.0, elasticNetParam=0)
pipeline = Pipeline(stages=[regexTokenizer, cv, idf, indexer, lr])
regModel = pipeline.fit(sto_data)
sto_data.filter(sto_data.label == sto_data.prediction).count() # gives number of accurately predicted labels on train
# MODEL SELECTION & TUNING
train, test = sto_data.randomSplit([0.6, 0.4], seed=42)
test, validation = test.randomSplit([0.5, 0.5], seed=42)
regModel = pipeline.fit(train)
results = regModel.transform(test)
results.filter(results.label == results.prediction).count() # gives number of accurately predicted labels on test set
# crossvalidation
paramGrid = ParamGridBuilder() \
.addGrid(cv.vocabSize, [10000, 20000]) \
.build()
crossval = CrossValidator(estimator=pipeline,
estimatorParamMaps=paramGrid,
evaluator=MulticlassClassificationEvaluator(),
numFolds=3)
cvModel = crossval.fit(train)
cvModel.avgMetrics
results = cvModel.transform(test)
results.filter(results.label == results.prediction).count() # gives number of accurately predicted labels on test set
spark.stop()
|
# My first python program
def newfunction() :
print "this is a new function"
def addMyName(num1, num2):
print(num1 + num2)
# Hello World in Python
print "Hello World!"
#Added a new comment
num1 = "S"
num2 = "W"
addMyName(num1, num2)
counter = 0
while counter < 10:
print "Loop number: %d" % counter
counter += 1
|
def testData():
otest = open('test.txt', 'r')
test = otest.readline()
oanswer = open('answer.txt', 'r')
answer = oanswer.readline()
status = False
print("Runs test data")
result = runCode(test)
print(type(result))
print(type(answer))
if int(result) == int(answer): #not always int
status = True
print("Correct answer: " + answer + "My answer: " + result)
return status
def runCode(data):
print("Runs code")
cups = []
#a list of numbers
for char in data.strip():
cups.append(int(char))
#Current cup default value
cc = 0
for i in range(1,101):
print("-- Move", i)
cups, cc = actions(cups, cc)
print("\n")
print(cups)
one = cups.index(1)
a = cups[one+1]
cups.remove(1)
b = cups.index(a)
firsthalf = cups[b:]
lasthalf = cups[:b]
lastlist = []
for h in firsthalf:
lastlist.append(str(h))
for l in lasthalf:
lastlist.append(str(l))
answer = ''.join(lastlist)
print(answer)
return answer
def actions(cups, cc):
print("cups:", cups)
length = len(cups)
#Modolo = what is left if you divide the right side of the modulo sign with the left side
#example = 11%10 = 1 because 11 fits in 10 one time and then one is left
#Indexes
c1 = cc+1%length
c2 = cc+2%length
c3 = cc+3%length
#Current cup
current_cup = cups[cc]
print("current:", current_cup)
#Cups to pick up
cup1 = cups[c1]
cup2 = cups[c2]
cup3 = cups[c3]
#Remove cups1-3
print("pick up:", cup1, cup2, cup3)
cups.remove(cup1)
cups.remove(cup2)
cups.remove(cup3)
#Find destination_cup
destination_cup = 0
i = current_cup-1
found = False
while not found:
#print(i, cups)
if i in cups:
destination_cup = i
found = True
elif i < min(cups):
destination_cup = max(cups)
found = True
else:
i-=1
dc = cups.index(destination_cup)
print("destination:", destination_cup)
#Choose new current cup
if cc < len(cups)-1:
new_current = cups[cc+1]
else:
new_current = cups[0]
#print("New current cup", new_current)
#Insert picked up cups:
cups.insert(dc+1, cup1)
cups.insert(dc+2, cup2)
cups.insert(dc+3, cup3)
#Get the new current cup's index
new_cc = cups.index(new_current)
'''
The new current should be at index current_index + 1
Take this index minus the index where this number is right now.
Then you get the number of cups that should be removed from the begnning of the list
and then put as the three last numbers of the list.
'''
should_be_index = cc+1
nrofcupstoremove = abs(new_cc-should_be_index)
#print("should be index", should_be_index)
#print("nr of cups to remove", nrofcupstoremove)
if nrofcupstoremove != 0:
#Extract cups
cupstoremove = []
for j in range(0,nrofcupstoremove):
cupstoremove.append(cups[j])
#print("Remove", cupstoremove, "from", cups)
#Remove cups from the beginning
for y in range(0,len(cupstoremove)):
#print(cupstoremove[y])
cups.remove(cupstoremove[y])
#Put back cups in the end
for z in range(0,len(cupstoremove)):
cups.append(cupstoremove[z])
#Get the new current cup's index
newest_cc = cups.index(new_current)
#print(cups)
return cups, newest_cc
#Runs testdata
testResult = testData()
if testResult == True:
print("Test data parsed. Tries to run puzzle.")
opuzzle = open('input.txt', 'r')
puzzle = opuzzle.readline()
finalResult = runCode(puzzle)
print(finalResult)
else:
print("Test data failed. Code is not correct. Try again.")
|
from PIL import Image
import math
if __name__ == '__main__':
# 打开图片
im = Image.open("1.jpg")
# 将图片转为黑白模式
im = im.convert("L")
# 初始化压缩比
rect_width = 8
# 获得图片尺寸
(width, height) = im.size
# 压缩后图片宽度
nwidth = math.ceil(width/rect_width)
# 压缩后图片高度
nheight = math.ceil(height/rect_width)
# 获得图片数据
lim = list(im.getdata())
# 文件内容
rect_list = []
for i in range(0, nheight):
for j in range(0, nwidth):
sum_temp = 0
for k in range(0, rect_width):
x = (i*rect_width+k)*width+j*rect_width
y = (i*rect_width+k)*width+j*rect_width + rect_width
sum_temp += sum(lim[x: y])
temp = math.ceil(sum_temp/(rect_width*rect_width))
if temp > 255/3:
rect_list.append("@@")
else:
rect_list.append("..")
rect_list.append("\n")
# 打开文件
file_object = open('1.txt', 'w')
# 写入内容
file_object.write(''.join(rect_list))
# 文件关闭
file_object.close()
|
import smtplib
from email.message import EmailMessage
import os.path
from os import path
import requests
from bs4 import BeautifulSoup
from lxml import html
import requests
from selenium import webdriver
import time
from os import path
import os
import sys
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from bs4 import BeautifulSoup
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import stdiomask
import re
from datetime import datetime
from datetime import timedelta
from assignment import assignment
import platform
def getCorrectDriver():
os = platform.system()
if os == "Windows":
return "drivers/chromedriver.exe"
elif os == "Darwin":
return "drivers/chromedriverMAC"
elif os == "Linux":
return "driver/chromedriverLIN"
else:
print("OS not supported")
global PATH
PATH = os.path.join(sys.path[0], getCorrectDriver())
global options
options = webdriver.ChromeOptions()
options.add_argument('--lang=en_US')
options.headless = True
options.detach = True
global driver
global timeDelta
def setDelta(delta):
global timeDelta
timeDelta = int(delta)
def setDriver():
global driver
global option
driver = webdriver.Chrome(PATH, options=options)
# def inputSaveCreds():
# global email
# global password
# global appPassword
# global smsGateway
# creds = open("creds.txt", "w")
# correctCreds = False
# while(not correctCreds):
# username = input("username:\n")
# print("------------------------------")
# password = stdiomask.getpass(prompt="password:\n")
# correctCreds = authenticate(username, password)
# if(correctCreds == False):
# print("Could not authenticate username and password, retry")
# print("------------------------------")
# creds.write(username)
# creds.write("\n")
# creds.write(password)
# creds.write("\n")
# print("------------------------------")
# appPass1 = stdiomask.getpass(prompt="App Password:\n")
# creds.write(appPass1)
# creds.write("\n")
# print("------------------------------")
# creds.write(input("number:\n"))
# print("------------------------------")
# c = (input("carrier:\n \"v\" for Verizon\n \"a\" for AT&T\n \"s\" for Sprint\n \"t\" for T-Mobile\n \"m\" for Metro PCS\n \"b\" for Boost Mobile\n"))
# print("------------------------------")
# if(c == "v"):
# creds.write("@vtext.com")
# elif(c == "a"):
# creds.write("@txt.att.net")
# elif(c == "s"):
# creds.write("@messaging.sprintpcs.com")
# elif(c == "t"):
# creds.write("@tmomail.net")
# elif(c == "m"):
# creds.write("@mymetropcs.com")
# elif(c == "b"):
# creds.write("@myboostmobile.com")
# creds.close()
# creds = open("creds.txt", "r")
# content = creds.readlines()
# email = content[0].replace("\n", "")
# password = content[1].replace("\n", "")
# appPassword = content[2].replace("\n", "")
# smsGateway = content[3].replace("\n", "")
# creds.close()
def authenticate(user, password,driver):
driver.get("http://njit.instructure.com/login/saml")
username = driver.find_element_by_name("j_username")
Password = driver.find_element_by_name("j_password")
username.send_keys(user)
Password.send_keys(password)
username.send_keys(Keys.RETURN)
if("The UCID or password you entered was incorrect." in driver.page_source):
return False
else:
return True
def setCreds():
global email
global password
global appPassword
global smsGateway
creds = open("creds.txt", "r")
content = creds.readlines()
creds.close()
email = content[0].replace("\n", "")
password = content[1].replace("\n", "")
appPassword = content[2].replace("\n", "")
smsGateway = content[3].replace("\n", "")
def email_alert(subject, body):
global email
global appPassword
global smsGateway
try:
msg = EmailMessage()
msg.set_content(body)
msg['subject'] = subject
msg['to'] = smsGateway
msg['from'] = "canvasalertsnjit@gmail.com"
# user must be the gmail of the sender
# password must be the google APP password - made avalible after 2 factor verification is set up
#"http://myaccount.google.com/" - navigate to security - signing in to google - app passwords - select app as other - name it anything - save the password that is given
user = email
password = appPassword
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
# print(user)
# print(appPassword)
server.login("canvasalertsnjit@gmail.com", "eeytczejwrwbmruh")
server.send_message(msg)
server.quit()
return True
except:
return False
def assignmentLinks():
global email
global password
global driver
driver.get("http://njit.instructure.com/login/saml")
username = driver.find_element_by_name("j_username")
Password = driver.find_element_by_name("j_password")
username.send_keys(email.replace("@njit.edu", ""))
Password.send_keys(password)
username.send_keys(Keys.RETURN)
checkbox = driver.find_element_by_id("accept")
checkbox.click()
accept = driver.find_element_by_id("submitbtn")
accept.click()
time.sleep(3)
soup = BeautifulSoup(driver.page_source, 'html.parser')
courseCards = soup.find_all('div', class_='ic-DashboardCard__header')
coursesList = []
for card in courseCards:
coursesList.append(card.a['href'])
### print("COURSELIST")
### print(coursesList)
assignmentLinks = []
for course in coursesList:
driver.get("http://njit.instructure.com" + course)
try:
#time.sleep(5)
assignmentTab = driver.find_element_by_class_name("assignments")
assignmentTab.click()
###print(course + "goes through")
except(NoSuchElementException, StaleElementReferenceException) as e:
continue
try:
element = WebDriverWait(driver, 4).until(EC.presence_of_element_located((By.ID, "assignment_group_upcoming_assignments")))
###print("why is this working now: " + course)
except(TimeoutException)as e:
continue
time.sleep(5)
assignmentPageSoup = BeautifulSoup(driver.page_source, 'html.parser')
upA = BeautifulSoup(str(assignmentPageSoup.find("div", {"id": "assignment_group_upcoming_assignments"})), 'html.parser')
###print("\n")
###print(course)
####print(upA)
###print("\n")
upAList = upA.find_all("div", {"id": re.compile('^assignment_\d+')})#assignment_97613 #assignment_[0-9]{3,5}
for assignment in upAList:
assignmentLinks.append(assignment.a['href'])
print(assignment.a['href'])
overDueA=BeautifulSoup(str(assignmentPageSoup.find("div", {"id": "assignment_group_overdue"})), 'html.parser')
overDueAList=overDueA.find_all("div", {"id": re.compile('assignment_[0-9]{3,5}')})
for assignment in overDueAList:
assignmentLinks.append(assignment.a['href'])
print(assignment.a['href'])
return assignmentLinks
# assignments = []
# for link in assignmentLinks:
# a = assignment(link, driver)
# assignments.append(a)
# print(a)
# print("if you just got an index out of bounds error that means that assignmentLinks was not populated - i think it may have to do with headless mode - i didnt get this issue when headless was False")
# def cleanAssignments(assignments):
# file = open("cleanAssignments.txt", "w")
# for a in assignments:
# print(str(a))
# file.write(str(a.assignmentInfo()))
# file.close()
def getEasyReadTime(date):
temp = date.split(",")
days = ""
hours = ""
minutes = ""
notDays = ""
if(len(temp) == 1):
notDays = temp[0]
hours = notDays[0:notDays.index(":")]
minutes = notDays[notDays.index(":") + 1:notDays.index(":") + 3]
else:
notDays = temp[1]
days = temp[0]
hours = notDays[0:notDays.index(":")]
minutes = notDays[notDays.index(":") + 1:notDays.index(":") + 3]
hours = int(hours)
minutes = int(minutes)
easyReadTimeRemaining = ""
if(days != ""):
easyReadTimeRemaining = days + ", "
if(hours > 0):
easyReadTimeRemaining = easyReadTimeRemaining + str(hours) + " hours, "
if(minutes > 0):
easyReadTimeRemaining = easyReadTimeRemaining + \
str(minutes) + " minutes"
return easyReadTimeRemaining
def assignmentList(links):
assignments = []
for link in links:
a = assignment(link, driver)
assignments.append(a)
# print(assignments)
return assignments
def sendAlertIfDue(assignmentList):
# daily divider - keep track of recent notifications more easily
d = datetime.now()
email_alert("", "________________" + d.strftime("%d") + "/" + d.strftime("%m") + "________________")
time.sleep(5)
# # file = open("cleanAssignments.txt", "r")
# assignments = file.readlines()
# file.close()
assignmentProperties = []
for assignment in assignmentList:
print(assignment)
for assignment in assignmentList:
if((assignment.delta) <= timedelta(days=timeDelta)):
time.sleep(1)
print(len(str(assignment)) + len(assignment.assignmentUrl()) +len(assignment.assignmentName()))
if(len(str(assignment)) + len(assignment.assignmentUrl()) +len(assignment.assignmentName()) > 150):
body = str(assignment)
email_alert(assignment.assignmentName(), body)
time.sleep(1)
email_alert("",assignment.assignmentUrl())
print("done")
else:
body = str(assignment) +" " + assignment.assignmentUrl()
email_alert(assignment.assignmentName(), body)
print("done")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/6/6 下午10:55
# @Author : Lucas Ma
# @File : __init__.py |
from django.contrib import admin
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.apps import apps
from polymorphic.admin import PolymorphicParentModelAdmin, PolymorphicChildModelAdmin
from mptt.admin import MPTTModelAdmin
from .models import (
Rate,
Rule,
Grade,
GradeRule,
GradeRate,
Referral,
Transaction
)
# TODO Next Release
# @admin.register(Rate)
class RateAdmin(admin.ModelAdmin):
list_display = [
'name',
'slug',
'description'
]
# TODO Next Release
# @admin.register(Rule)
class RuleAdmin(admin.ModelAdmin):
list_display = [
'name',
'slug',
'description',
'weighting'
]
class GradeRateInline(admin.TabularInline):
extra = 0
model = GradeRate
class GradeRuleInline(admin.TabularInline):
extra = 0
model = GradeRule
# TODO Next Release
# @admin.register(Grade)
class GradeAdmin(admin.ModelAdmin):
inlines = [GradeRuleInline, GradeRateInline]
list_display = [
'name',
'slug',
'description'
]
@admin.register(Referral)
class ReferralAdmin(MPTTModelAdmin):
list_filter = ['level']
list_select_related = ['account', 'parent']
search_fields = ['account__first_name', 'account__last_name']
list_display = ['inner_id', 'account', 'parent', 'decendants', 'downlines', 'level', 'created_at', 'balance']
def decendants(self, obj):
return obj.get_descendant_count()
def downlines(self, obj):
return obj.downlines.count()
def get_queryset(self, request):
return super().get_queryset(request).only('inner_id', 'account', 'parent')
@admin.register(Transaction)
class TransactionAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
list_filter = ['created_at', 'flow']
search_fields = ['referral__account__first_name', 'referral__account__username']
list_display = ['inner_id', 'referral', 'note', 'flow', 'rate', 'total', 'balance', 'created_at']
def has_change_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class ReferralInline(admin.TabularInline):
model = Referral
can_delete = False
extra = 1
max_num = 1
|
# Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
from .utils import NamespacedClient, query_params, _make_path
class MigrationClient(NamespacedClient):
@query_params()
async def deprecations(self, index=None, params=None, headers=None):
"""
Retrieves information about different cluster, node, and index level settings
that use deprecated features that will be removed or changed in the next major
version.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/migration-api-deprecation.html>`_
:arg index: Index pattern
"""
return await self.transport.perform_request(
"GET",
_make_path(index, "_migration", "deprecations"),
params=params,
headers=headers,
)
|
import socket
import threading
import os
import time
import appJar
HOST = socket.gethostname()
PORT = 5001
all_connections = []
all_address = []
def getFile(name, conn):
while True:
data = conn.recv(1024)
client_command = data.decode('utf-8')
if client_command == 'L':
server_response = os.listdir()
server_response = ','.join(server_response)
conn.send(server_response.encode())
if os.path.isfile(data):
filesize = ('File exists' + str(os.path.getsize(data)))
conn.send(filesize.encode('utf-8'))
userResponse = conn.recv(1024)
if userResponse[:2].decode('utf-8') == 'OK':
with open(data, 'rb') as f:
bytesToSend = f.read(1024)
conn.send(bytesToSend)
while bytesToSend != '':
bytesToSend = f.read(1024)
conn.send(bytesToSend)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
print('Server listening for connections')
def main():
while True:
server_socket.listen(5)
conn, addr = server_socket.accept()
print('Connected with ' + addr[0] + ' : ' + str(addr[1]))
myThread = threading.Thread(target=getFile ,args=('getFile', conn))
myThread.start()
server_socket.close()
main()
|
from pandas_datareader import data as pdr
import fix_yahoo_finance as yf
import numpy as np
import pandas
import os
import util
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.preprocessing import MinMaxScaler
def norm(x):
return x / np.linalg.norm(x)
#scaler = MinMaxScaler(feature_range=(0, 1))
#data = pandas.read_csv("{}/raw/GOOG.csv".format(os.getcwd()))
#data.drop(columns = ["Date", "Adj Close"], inplace=True)
#data = scaler.fit_transform(data)
#msk = np.random.rand(len(data)) < 0.8
#train = data[msk]
#test = data[~msk]
#print (test)
#print (norm(data["Open"].tolist()))
def getEtfList():
path = "{}/analysis/ETFList.csv".format(os.getcwd())
data = pandas.read_csv(path)
return data['Symbol'].tolist()
#util.process(getEtfList())
#raise SystemError
def process(stocks, directory = "all"):
#global percent_list, notinvested
percent_list = {}
for astock in stocks:
path = "{}/{}/{}.csv".format(os.getcwd(), directory, astock)
if not os.path.exists(path):
continue
print (path)
df = pandas.read_csv(path)
raising = 0
down = 0
total = 0
start = 0
last = 0
for idx, row in df.tail(12).iterrows():
opend = int(df.at[idx, "Open"])
closed = int(df.at[idx, "Close"])
if start == 0:
start = opend
last = closed
total += 1
if opend > closed:
down += 1
if closed > opend:
raising += 1
try:
percent_list[astock] = [ round(down/total,3), round(raising/total,3), round(last/start,4) ]
except:
pass
df = pandas.DataFrame.from_dict(percent_list, orient = 'index', columns=["%Down", "%Up", "Change"])
path = "{}/analysis/gg_trending.csv".format(os.getcwd())
df.to_csv(path)
def getOtherData(stocks):
for astock in stocks:
path = "{}/{}/_{}.csv".format(os.getcwd(), directory, astock)
if not os.path.exists(path):
continue
print (path)
df = pandas.read_csv(path)
stocks = getStocks("IVV")
#getOtherData(stocks)
process(stocks)
#process(getStocks("IWB"), "all")
|
__author__ = 'sudoz'
if __name__ == '__main__':
print('this program was executed by itself')
else:
print('it is imported from other module')
|
##length=int(input("enter no.of length do you want"))
##print("enter any nymbers")
##lis=[]
##for number in range(length):
## num=input()
## lis.append(num)
##print("lis=",lis)
newlist=[]
number=int(input("enter the length of to creat a newlist"))
print("enter a numbers here:")
i=0
while i<=number:
lis=int(input())
newlist.append(lis)
i+=1
print("newlist=",newlist)
|
# -*- coding: utf-8 -*-
import scrapy
import os
import re
class jxufespider(scrapy.Spider):
name = "jxufespider"
allowed_domains = ["movie.douban.com"]
start_urls = (
'https://movie.douban.com/subject/1291546/reviews?start=0',
)
save_path='..\\doubanAssess\\' #create director for downloaded pages
if not os.path.exists(save_path):
os.system('md '+save_path)
urls = {} # save the urls of downloaded pages
def parse(self, response):
# if 'start' not in response.url:
filename = self.save_path + re.sub('https:|\/|\?','',response.url) #write downloaded pages
with open(filename, 'wb') as f:
f.write(response.body)
title = response.xpath('//title/text()').extract()
print ' '.join(title)
# based on the observation of webpages
#suppose all interest urls are in the path /ul/li,such as
# <ul>
# <li class="first"><a href="http://business.sohu.com" target="_blank">财经</a></li>
nextLink = response.xpath('//span[@class="next"]/a/@href')
for sel in response.xpath('//div[@class="main-bd"]/h2/a/@href')+nextLink:
url=sel.extract()
if 'start' in url:
url= 'https://movie.douban.com/subject/1291546/reviews'+url
# links = sel.xpath('h2/a/@href').extract()
# for url in links:
# for url in sel:
if url not in self.urls:
self.urls[url]=''
print url
yield scrapy.Request(url, callback=self.parse) #根据获取的URL,再次抓取
|
import pygame
aktualnie_wyswietlane = [[0 for col in range(14)] for row in range(5)]
aktualna_podpowiedz = [[0 for col_ in range(14)] for row_ in range(5)]
class Kod(object):
def main(self, aktualny_kolor):
Kod.reset(self)
self.pos = pygame.mouse.get_pos()
if self.stop == 0:
if aktualnie_wyswietlane[1][self.wiersz] and aktualnie_wyswietlane[2][self.wiersz] and aktualnie_wyswietlane[3][self.wiersz] and aktualnie_wyswietlane[4][self.wiersz]:
enter = pygame.key.get_pressed()
if enter[13]:
self.enter += 1
Kod.wyswietlanie_pytajnikow(self)
Kod.wyswietlanie_kulek(self)
if aktualny_kolor == 6:
x_1, x_2 = 77, 111
if aktualny_kolor == 7:
x_1, x_2 = 57, 89
if aktualny_kolor == 8:
x_1, x_2 = 37, 70
x_pole_1, x_pole_2 = 156, 190
if self.click[0]:
pozycja = pygame.mouse.get_pos()
else:
pozycja = (-1, -1)
for pole in range(1, 5):
if pozycja[0] in range(x_pole_1, x_pole_2) and pozycja[1] in range(self.y_1, self.y_1 + 35):
self.click_pole = pole
x_pole_1 += 58
x_pole_2 += 58
# print(self.click_pole)
for kulka in range(1, aktualny_kolor + 1):
if pozycja[0] in range(x_1, x_2) and pozycja[1] in range(665, 700):
aktualnie_wyswietlane[self.click_pole][self.wiersz] = kulka
x_1 += 46
x_2 += 46
def kod_gry(self, aktualny_kolor):
self.kod_los = self.twoj_kod
dodatkowe_2 = 0
sprawdzenie = [0, 0, 0, 0]
for wiersz in range(0, self.click_runda * 2 + 6):
index = 1
tmp = [0, 0, 0, 0]
self.podpowiedz = []
if self.enter == wiersz:
self.podpowiedz.append(0)
if aktualnie_wyswietlane[1][wiersz + 1] == self.kod_los[0]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[0])
if aktualnie_wyswietlane[2][wiersz + 1] == self.kod_los[1]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[1])
if aktualnie_wyswietlane[3][wiersz + 1] == self.kod_los[2]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[2])
if aktualnie_wyswietlane[4][wiersz + 1] == self.kod_los[3]:
self.podpowiedz.append(1)
index += 1
tmp.append(self.kod_los[3])
self.kod_los_blad = self.kod_los.copy()
for kol_2 in range(1, 5):
sprawdzenie[kol_2-1] = aktualnie_wyswietlane[kol_2][wiersz + 1]
for kol in range(1, 5):
if sprawdzenie.count(aktualnie_wyswietlane[kol][wiersz + 1]) > tmp.count(aktualnie_wyswietlane[kol][wiersz + 1]):
dodatkowe_2 = self.kod_los_blad.count(aktualnie_wyswietlane[kol][wiersz + 1]) - tmp.count(aktualnie_wyswietlane[kol][wiersz + 1])
sprawdzenie.remove(aktualnie_wyswietlane[kol][wiersz + 1])
if dodatkowe_2 or (aktualnie_wyswietlane[kol][wiersz + 1] in self.kod_los_blad and not aktualnie_wyswietlane[kol][wiersz + 1] in tmp):
self.podpowiedz.append(2)
if self.kod_los_blad.count(aktualnie_wyswietlane[kol][wiersz + 1]):
self.kod_los_blad.remove(aktualnie_wyswietlane[kol][wiersz + 1])
dodatkowe_2 = 0
#print("podp=",self.podpowiedz, "tmp=",tmp, "sprawdz=",sprawdzenie, "blad=",self.kod_los_blad)
while index <= 5:
self.podpowiedz.append(0)
index += 1
for kolumna in range(1, 5):
if wiersz == self.enter and self.podpowiedz[kolumna] == 0:
aktualna_podpowiedz[kolumna][wiersz + 1] = 0
if self.podpowiedz[kolumna] == 1:
aktualna_podpowiedz[kolumna][wiersz + 1] = 1
if self.podpowiedz[kolumna] == 2:
aktualna_podpowiedz[kolumna][wiersz + 1] = 2
Kod.wyswietlanie_podpowiedzi(self)
Kod.czy_wygrana(self)
def wyswietlanie_kulek(self):
czerwona = pygame.image.load("Obrazy/kulki/czerwona.png")
zielona = pygame.image.load("Obrazy/kulki/zielona.png")
niebieska = pygame.image.load("Obrazy/kulki/niebieska.png")
blekitna = pygame.image.load("Obrazy/kulki/blekitna.png")
rozowa = pygame.image.load("Obrazy/kulki/rozowa.png")
zolta = pygame.image.load("Obrazy/kulki/zolta.png")
szara = pygame.image.load("Obrazy/kulki/szara.png")
czarna = pygame.image.load("Obrazy/kulki/czarna.png")
self.y = 571
for wysokosc in range(1, 14):
x = 156
for xz in range(1, 5):
if aktualnie_wyswietlane[xz][wysokosc] == 1:
self.screen.blit(czerwona, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 2:
self.screen.blit(zielona, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 3:
self.screen.blit(niebieska, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 4:
self.screen.blit(blekitna, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 5:
self.screen.blit(rozowa, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 6:
self.screen.blit(zolta, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 7:
self.screen.blit(szara, (x, self.y))
elif aktualnie_wyswietlane[xz][wysokosc] == 8:
self.screen.blit(czarna, (x, self.y))
x += 58
self.y -= 50
def wyswietlanie_pytajnikow(self):
pytajnik = pygame.image.load("Obrazy/kulki/pytajnik2.png")
for wiersz_2 in range(1, self.click_runda * 2 + 6):
if self.enter == wiersz_2:
self.y_1 = 571 - 50 * wiersz_2
self.wiersz = wiersz_2 + 1 # self wiersz - ktora linijka po enterze
self.screen.blit(pytajnik, (156, 571 - 50 * wiersz_2))
self.screen.blit(pytajnik, (214, 571 - 50 * wiersz_2))
self.screen.blit(pytajnik, (272, 571 - 50 * wiersz_2))
self.screen.blit(pytajnik, (330, 571 - 50 * wiersz_2))
elif self.enter == 0:
self.screen.blit(pytajnik, (156, 571))
self.screen.blit(pytajnik, (214, 571))
self.screen.blit(pytajnik, (272, 571))
self.screen.blit(pytajnik, (330, 571))
def wyswietlanie_podpowiedzi(self):
mala_czarna = pygame.image.load("Obrazy/kulki/mala_czarna.png")
mala_biala = pygame.image.load("Obrazy/kulki/mala_biala.png")
for wysokos_2 in range(1, 14):
if self.enter + 1 == wysokos_2:
continue
if aktualna_podpowiedz[1][wysokos_2] == 1:
self.screen.blit(mala_czarna, (37, 623 - 50 * wysokos_2))
elif aktualna_podpowiedz[1][wysokos_2] == 2:
self.screen.blit(mala_biala, (37, 623 - 50 * wysokos_2))
if aktualna_podpowiedz[2][wysokos_2] == 1:
self.screen.blit(mala_czarna, (61, 623 - 50 * wysokos_2))
elif aktualna_podpowiedz[2][wysokos_2] == 2:
self.screen.blit(mala_biala, (61, 623 - 50 * wysokos_2))
if aktualna_podpowiedz[3][wysokos_2] == 1:
self.screen.blit(mala_czarna, (37, 647 - 50 * wysokos_2))
elif aktualna_podpowiedz[3][wysokos_2] == 2:
self.screen.blit(mala_biala, (37, 647 - 50 * wysokos_2))
if aktualna_podpowiedz[4][wysokos_2] == 1:
self.screen.blit(mala_czarna, (61, 647 - 50 * wysokos_2))
elif aktualna_podpowiedz[4][wysokos_2] == 2:
self.screen.blit(mala_biala, (61, 647 - 50 * wysokos_2))
def czy_wygrana(self):
wygrana_w_ostatiej = 0
for wiersz in range(1, 14):
if aktualna_podpowiedz[1][wiersz] == 1 and aktualna_podpowiedz[2][wiersz] == 1 :
if aktualna_podpowiedz[3][wiersz] == 1 and aktualna_podpowiedz[4][wiersz] == 1:
if self.enter + 1 == wiersz:
continue
wygrana_w_ostatiej = 1
wygrana = pygame.image.load("Obrazy/wygrana.png")
for q in range(1,5):
for p in range(0, self.click_runda * 2 + 7):
aktualnie_wyswietlane[q][p] = self.kod_los[q - 1]
self.screen.blit(wygrana, (0, 300))
self.stop = 1
if self.wstecz == 1:
self.stop = 0
self.aktualnie_wyswietlane = 1
self.mozna_grac = 0
self.twoj_kod = [0, 0, 0, 0]
self.reset = 1
Kod.reset(self)
if self.enter == self.click_runda * 2 + 6 and wygrana_w_ostatiej == 0:
przegrana = pygame.image.load("Obrazy/przegrana.png")
self.screen.blit(przegrana, (0, 300))
for q in range(1, 5):
for p in range(0, self.click_runda * 2 + 7):
aktualnie_wyswietlane[q][p] = self.kod_los[q - 1]
self.stop = 1
if self.wstecz == 1:
self.stop = 0
self.aktualnie_wyswietlane = 1
self.reset = 1
Kod.reset(self)
def reset(self):
if self.reset == 1:
for iksy in range(1, 5):
for igreki in range(1, 14):
aktualnie_wyswietlane[iksy][igreki] = 0
aktualna_podpowiedz[iksy][igreki] = 0
self.click_vs = 0
self.click_runda = 0
self.aktualny_kolor = 0
self.aktualny_vs = 0
self.click_kolor = 0
self.wstecz = 0
self.click_miejsce = 0
self.click = pygame.mouse.get_pressed()
self.click_pole = 0
self.enter = 0
self.y_1 = 571
self.yz = 1
self.wiersz = 1
self.kod_los = [0, 0, 0, 0]
self.reset = 0
self.mozna_grac = 0
self.twoj_kod = [0, 0, 0, 0]
self.mozna_grac = 0
class Ustawianie_Kodu(object):
def main(self, ilosc_kolorow):
ustawiony_kod = pygame.image.load("Obrazy/ukladanie_kodu.png")
self.screen.blit(ustawiony_kod, (0,0))
if ilosc_kolorow == 6:
Ustawianie_Kodu.kulki_szesc(self)
x_1, x_2 = 77, 111
elif ilosc_kolorow == 7:
Ustawianie_Kodu.kulki_siedem(self)
x_1, x_2 = 57, 89
elif ilosc_kolorow == 8:
Ustawianie_Kodu.kulki_osiem(self)
x_1, x_2 = 37, 70
x_pole_1, x_pole_2 = 122, 155
if self.click[0]:
pozycja = pygame.mouse.get_pos()
else:
pozycja = (-1, -1)
# Klikanie na twoj kod
for pole in range(0, 4):
if pozycja[0] in range(x_pole_1, x_pole_2) and pozycja[1] in range(546, 580):
self.click_pole = pole
x_pole_1 += 49
x_pole_2 += 49
# klikanie na liste kolorow
for kulka in range(1, ilosc_kolorow + 1):
if pozycja[0] in range(x_1, x_2) and pozycja[1] in range(665, 700):
self.twoj_kod[self.click_pole] = kulka
x_1 += 46
x_2 += 46
czerwona = pygame.image.load("Obrazy/kulki/czerwona.png")
zielona = pygame.image.load("Obrazy/kulki/zielona.png")
niebieska = pygame.image.load("Obrazy/kulki/niebieska.png")
blekitna = pygame.image.load("Obrazy/kulki/blekitna.png")
rozowa = pygame.image.load("Obrazy/kulki/rozowa.png")
zolta = pygame.image.load("Obrazy/kulki/zolta.png")
szara = pygame.image.load("Obrazy/kulki/szara.png")
czarna = pygame.image.load("Obrazy/kulki/czarna.png")
x = 122
for numer in range(4):
if self.twoj_kod[numer] == 1:
self.screen.blit(czerwona, (x, 546))
if self.twoj_kod[numer] == 2:
self.screen.blit(zielona, (x, 546))
if self.twoj_kod[numer] == 3:
self.screen.blit(niebieska, (x, 546))
if self.twoj_kod[numer] == 4:
self.screen.blit(blekitna, (x, 546))
if self.twoj_kod[numer] == 5:
self.screen.blit(rozowa, (x, 546))
if self.twoj_kod[numer] == 6:
self.screen.blit(zolta, (x, 546))
if self.twoj_kod[numer] == 7:
self.screen.blit(szara, (x, 546))
if self.twoj_kod[numer] == 8:
self.screen.blit(czarna, (x, 546))
x += 49
if self.stop == 0:
if self.twoj_kod[0] and self.twoj_kod[1] and self.twoj_kod[2] and self.twoj_kod[3]:
enter = pygame.key.get_pressed()
if enter[13]:
self.mozna_grac = 1
if self.wstecz == 1:
wyjscie = pygame.image.load("Obrazy/wyjsc.png")
self.screen.blit(wyjscie, (0, 300))
self.stop = 1
def kulki_szesc(self):
k_6 = pygame.image.load("Obrazy/kulki_6.png")
self.screen.blit(k_6, (62, 650))
def kulki_siedem(self):
k_7 = pygame.image.load("Obrazy/kulki_7.png")
self.screen.blit(k_7, (40, 650))
def kulki_osiem(self):
k_8 = pygame.image.load("Obrazy/kulki_8.png")
self.screen.blit(k_8, (21, 650)) |
class Codec:
def encode(self, strs: List[str]) -> str:
"""Encodes a list of strings to a single string.
"""
payload = [",".join([str(ord(c)) for c in s]) for s in strs]
return ".".join(payload)
def decode(self, s: str) -> List[str]:
"""Decodes a single string to a list of strings.
"""
payload = s.split('.')
print(payload)
res = ["".join([chr(int(c)) if c else "" for c in s.split(',')]) for s in payload]
return res
|
import datetime
import pytz
from django.conf.global_settings import AUTH_USER_MODEL
from django.db import models
from django.urls import reverse
from django.utils import timezone
def normalize(submission: str):
return submission.replace('\r\n', '\n').strip()
class Problem(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
description = models.TextField()
input = models.TextField(null=True, blank=True)
solution = models.TextField(null=True, blank=True)
def get_deferred_fields(self):
pass
def get_absolute_url(self):
return reverse('contest:problem', kwargs=dict(slug=self.slug))
def __str__(self):
return f'{self.title} ({self.slug})'
class Contest(models.Model):
title = models.CharField(max_length=255)
start = models.DateTimeField()
end = models.DateTimeField()
problems = models.ManyToManyField(Problem)
@classmethod
def active(cls):
now = datetime.datetime.now()
return cls.objects.filter(start__lt=now, end__gt=now)
@classmethod
def started(cls):
now = datetime.datetime.now()
return cls.objects.filter(start__lt=now)
@property
def is_active(self):
return self.start < datetime.datetime.now() < self.end
@property
def problem_list(self):
return self.problems.defer('solution', 'input', 'description').order_by('title')
def __str__(self):
state = 'active' if self.is_active else 'inactive'
return f'{self.title!r} ({state})'
class Score(models.Model):
user = models.OneToOneField(AUTH_USER_MODEL, on_delete=models.DO_NOTHING)
points = models.IntegerField(default=0)
minutes = models.IntegerField(default=0)
@property
def deferred_submissions(self):
return Submission.objects.defer(
'problem__input', 'problem__solution'
)
@property
def correct_submissions(self):
return self.deferred_submissions.filter(user=self.user, correct=True)
@property
def solved_problems(self):
return {sub.problem for sub in self.correct_submissions}
def get_first_solution(self, problem):
solved = self.deferred_submissions.filter(user=self.user, problem=problem,
correct=True).order_by('time')
if solved:
return solved[0]
return None
def get_bad_attempts(self, problem):
first_solution = self.get_first_solution(problem)
if first_solution is None:
attempts = Submission.objects.defer(
'problem__input', 'problem__solution'
).filter(user=self.user, problem=problem, correct=False)
else:
attempts = self.deferred_submissions.filter(user=self.user, problem=problem,
correct=False,
time__lt=first_solution.time)
return len(attempts)
def get_time(self, problem):
solution = self.get_first_solution(problem)
penalty = 20 * self.get_bad_attempts(problem)
if not solution:
return penalty
active = Contest.active()
if not active:
start = Contest.objects.all()[0].start
else:
start = active[0].start
minutes = (solution.time - start).seconds // 60
return minutes + penalty
def recompute(self):
self.points = len(self.solved_problems)
self.minutes = sum(self.get_time(p) for p in self.solved_problems)
self.save()
def __str__(self):
return f'{self.user} score ({self.points} pts)'
class Submission(models.Model):
problem = models.ForeignKey(Problem, on_delete=models.CASCADE)
user = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.DO_NOTHING)
time = models.DateTimeField(default=datetime.datetime(1970, 1, 1, tzinfo=pytz.utc))
correct = models.BooleanField(default=False)
def get_absolute_url(self):
return reverse('contest:submission', kwargs=dict(pk=self.pk))
@classmethod
def grade(cls, problem, user, submission):
correct = normalize(problem.solution) == normalize(submission)
now = datetime.datetime.now()
return cls(problem=problem, user=user, time=now, correct=correct)
def save(self, *a, **kw):
super(Submission, self).save(*a, **kw)
score, created = Score.objects.get_or_create(user=self.user)
score.recompute()
def __str__(self):
return (
f'{self.user.username} - '
f'{self.problem.title} ({self.problem.slug}) - '
f'{"correct" if self.correct else "incorrect"} - '
f'{self.time:%m/%d/%y %I:%M %p}'
)
|
# Bài 12: Viết hàm
# def find_x(a_list, x)
# trả lại tất cả các vị trí mà x xuất hiện trong a_list, nếu không có thì trả lại -1
a_list = [1,1,1,2,5]
def find_x(a_list,x) :
my_list = []
for i in range(len(a_list)) :
if x == a_list[i] :
my_list.append(i)
if my_list == [] :
return -1
else :
return my_list
print(f'Vi tri tri x xuat hien {find_x(a_list,2)}') |
import socket
import struct
import hashlib
import os
import json
import time
sk = socket.socket()
sk.bind(('127.0.0.1',43))
sk.listen()
def login():
def register():
'''
注册
'''
count = 0
while count < 4:
conn.send('请输入注册用户名:'.encode('utf-8'))
username = conn.recv(1024).decode('utf-8')
print(username)
conn.send('请输入注册密码:'.encode('utf-8'))
password = conn.recv(1024).decode('utf-8')
with open('user_msg.txt', encoding='utf-8', mode='r') as f1, open('user_msg.txt', encoding='utf-8',
mode='a') as f2:
lst1 = []
for line in f1:
lst1.append(line.strip().split('|')[0])
if username.strip() not in lst1:
md5 = hashlib.md5()
md5.update(username.encode('utf-8'))
md5.update(password.encode('utf-8'))
ret = md5.hexdigest()
f2.write(username + '|' + ret + '\n')
# f2.write(username + '\n')
conn.send(f'{username},恭喜您,注册成功!即将返回主界面!请登陆!'.encode('utf-8'))
time.sleep(0.5)
return True
elif username.strip() in lst1:
count += 1
conn.send(f'用户名已存在!请重新注册。你还有{3 - count}次注册机会。'.encode('utf-8'))
time.sleep(0.5)
'''
登录
'''
count = 0
while count < 4:
conn.send('请输入用户名:'.encode('utf-8'))
print('111111')
username = conn.recv(1024).decode('utf-8')
print(username)
conn.send('请输入密码:'.encode('utf-8'))
password = conn.recv(1024).decode('utf-8')
md5 = hashlib.md5()
md5.update(username.encode('utf-8'))
md5.update(password.encode('utf-8'))
ret = md5.hexdigest()
with open('user_msg.txt', encoding='utf-8') as f:
l = []
for i in f:
l.append(i.strip().split('|')[0])
if i.strip().split('|')[0] == username and i.strip().split('|')[1] == ret:
conn.send('登录成功'.encode('utf-8'))
return username
else:
if username not in l:
count += 1
conn.send('不存在用户{},请注册!请按回车键'.format(username).encode('utf-8'))
register()
else:
count += 1
conn.send(f'用户名或密码错误!请重新登陆!你还有{3 - count}次机会。'.encode('utf-8'))
time.sleep(0.6)
while True:
database = r'D:\programming_with_python\043从零开始学python\day18\ftp上传和下载\server\database'
while True:
conn,addr = sk.accept()
username = login()
while True:
try:
# 1 收 判断要上传还是下载
judge = conn.recv(2).decode('utf-8')
if judge=='1': #上传
# 2,3 收 文件头信息,包括文件名和文件大小
fileheader_len = conn.recv(4)
fileheader_len = struct.unpack('i',fileheader_len)[0]
fileheader = json.loads(conn.recv(fileheader_len).decode('utf-8'))
print(fileheader)
with open(os.path.join(database,fileheader['filename']),'wb') as f:
while fileheader['filesize']>0:
# 4.收 文件内容
temp = conn.recv(1024)
fileheader['filesize'] -= len(temp)
print(fileheader['filesize'])
f.write(temp)
elif judge=='2': #下载
#
try:
dirlist_o = os.listdir(database)
dirlist_o = json.dumps(dirlist_o).encode('utf-8')
# 1 发 可下载文件列表
conn.send(dirlist_o)
# 1 收 要下载的文件名
filename_down = conn.recv(1024).decode('utf-8')
print('filename_down:',filename_down)
filesize = os.path.getsize(os.path.join(database, filename_down))
print('filesize:',filesize)
dic = {'filename': filename_down, 'filesize': filesize}
dic = json.dumps(dic).encode('utf-8')
m_dic = struct.pack('i', len(dic))
print('m_xic',m_dic)
# 2,3 发 文件头信息
conn.send(m_dic)
conn.send(dic)
print("m_dic",m_dic)
with open(os.path.join(database,filename_down),'rb') as f:
while filesize > 0:
temp = f.read(1024)
filesize -= len(temp)
# 4. 发 文件内容
conn.send(temp)
conn.close()
break
except Exception:
break
elif judge.lower()=='q':
break
except Exception:
continue
conn.close()
sk.close()
|
"""
наилучшее среднеквадратичное приближениеи
"""
from math import sin, pi, factorial, cos, exp, log
from collections import namedtuple
Table = namedtuple('Table', ['x','y', 'w']) # w = вес функции
eps_const = 0.00001
eps_otn = 0.0001
def fi(x, k):
return x ** k
# Загрузка таблицы координат точек и их весов из файла
def get_table(filename):
infile = open(filename, 'r')
data = []
for line in infile:
if line:
a, b, c = map(float, line.split())
data.append(Table(a, b, c))
print(data)
infile.close()
return data
# Вывод графика аппроксимирующей функции и исходных точек
def print_result(table, A, n):
import numpy as np
import matplotlib.pyplot as plt
dx = 10
if len(table) > 1:
dx = (table[1].x - table[0].x)
# построение аппроксимирующей функции
x = np.linspace(table[0].x - dx, table[-1].x + dx, 100)
y = []
for i in x:
tmp = 0;
for j in range(0, n + 1):
tmp += fi(i, j) * A[j]
y.append(tmp)
plt.plot(x, y)
#построение исходной таблицы
x1 = [a.x for a in table]
y1 = [a.y for a in table]
plt.plot(x1, y1, 'kD', color = 'green', label = '$исходная таблица$')
plt.grid(True)
plt.legend(loc = 'best')
miny = min(min(y), min(y1))
maxy = max(max(y), max(y1))
dy = (maxy - miny) * 0.03
plt.axis([table[0].x - dx, table[-1].x + dx, miny - dy, maxy + dy])
plt.show()
return
# получение СЛАУ по исходным данным для заданной степени
# возвращает матрицу коэф. и столбец свободных членов
def get_slau_matrix(table, n):
N = len(table)
matrix = [[0 for i in range(0, n + 1)] for j in range (0, n + 1)]
col = [0 for i in range(0, n + 1)]
for m in range(0, n + 1):
for i in range(0, N):
tmp = table[i].w * fi(table[i].x, m)
for k in range(0, n + 1):
matrix[m][k] += tmp * fi(table[i].x, k)
col[m] += tmp * table[i].y
return matrix, col
# умножение столбца на матрицу
def mult(col, b):
n = len(col)
c = [0 for j in range(0, n)]
for j in range(0, n):
for k in range(0, n):
c[j] += col[k] * b[j][k]
return c
# поиск столбца обратной матрицы
def find_col(a_copy, i_col):
n = len(a_copy)
a = [[a_copy[i][j] for j in range(0, n)] for i in range (0, n)]
col = [0 for i in range(0, n)]
for i in range(0, n):
a[i].append(float(i == i_col))
for i in range(0, n):
if a[i][i] == 0:
for j in range(i + 1, n):
if a[j][j] != 0:
a[i], a[j] = a[j], a[i]
for j in range(i + 1, n):
d = - a[j][i] / a[i][i]
for k in range(0, n + 1):
a[j][k] += d * a[i][k]
for i in range(n - 1, -1, -1):
res = 0
for j in range(0, n):
res += a[i][j] * col[j]
col[i] = (a[i][n] - res) / a[i][i]
return col
# получение обратной матрицы
def get_inverse_matrix(a):
n = len(a)
res = [[0 for i in range(0, n)] for j in range (0, n)]
for i in range(0, n):
col = find_col(a, i)
for j in range(0, n):
res[j][i] = col[j];
return res;
# получение коэф. аппроксимирующей функции
def get_approx_coef(table, n):
m, z = get_slau_matrix(table, n)
inv = get_inverse_matrix(m)
a = mult(z, inv)
return a
table = get_table("table.txt")
n = int(input("Введите степень полинома n = "))
A = get_approx_coef(table, n)
print_result(table, A, n)
|
# This is just a lot of long shit
from telegram import InlineKeyboardButton
CONFIRM = [[InlineKeyboardButton("Confirm", callback_data='Confirm'),
InlineKeyboardButton("Back", callback_data='Back')]]
CLASSES_BUTTONS = [[InlineKeyboardButton("Barbarian", callback_data='Barbarian'),
InlineKeyboardButton("Bard", callback_data='Bard'),
InlineKeyboardButton("Cleric", callback_data='Cleric'),
InlineKeyboardButton("Druid", callback_data='Druid')],
[InlineKeyboardButton("Fighter", callback_data='Fighter'),
InlineKeyboardButton("Monk", callback_data='Monk'),
InlineKeyboardButton("Paladin", callback_data='Paladin'),
InlineKeyboardButton("Ranger", callback_data='Ranger')],
[InlineKeyboardButton("Rogue", callback_data='Rogue'),
InlineKeyboardButton("Sorcerer", callback_data='Sorcerer'),
InlineKeyboardButton("Warlock", callback_data='Warlock'),
InlineKeyboardButton("Wizard", callback_data='Wizard')]]
RACES_BUTTONS = [[InlineKeyboardButton("Dragonborn", callback_data="Dragonborn"),
InlineKeyboardButton("Dwarf", callback_data="Dwarf"),
InlineKeyboardButton("Elf", callback_data="Elf"),
InlineKeyboardButton("Gnome", callback_data="Gnome"),
InlineKeyboardButton("Half-Elf", callback_data="Half-Elf")],
[InlineKeyboardButton("Halfling", callback_data="Halfling"),
InlineKeyboardButton("Half-Orc", callback_data="Half-Orc"),
InlineKeyboardButton("Human", callback_data="Human"),
InlineKeyboardButton("Tiefling", callback_data="Tiefling")]]
DESCRIPTIONS = {
"areyousure": "Do you want to confirm your attributes? Going back will start attribute assignment over",
"Barbarian" : ("https://i.pinimg.com/originals/d3/e1/4b/d3e14b7c318ff2ddb4fe25fda8757d4f.jpg\nTo a barbarian, though, "
"civilization is no virtue, but a sign of weakness. The strong embrace their animal nature—keen instincts, "
"primal physicality, and ferocious rage. ... Barbarians come alive in the chaos of combat.\n They can "
"enter a berserk state where rage takes over, giving them superhuman strength and resilience."),
"Bard" : ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/369/420/618/636272705936709430.png\n"
"The bard is a standard playable character class in many editions of the Dungeons & Dragons fantasy role-playing game."
" The bard class is versatile, capable of combat and of magic (divine magic in earlier editions, arcane magic in later editions)."
"Bards use their artistic talents to induce magical effects."),
"Cleric" : ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/371/420/618/636272706155064423.png\n"
"Clerics are versatile figures, both capable in combat and skilled in the use of divine magic (thaumaturgy)."
" Clerics are powerful healers due to the large number of healing and curative magics available to them. "
"With divinely-granted abilities over life or death, they are also able to repel or control undead creatures."),
"Druid" : ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/346/420/618/636272691461725405.png)\n"
"Whether calling on the elemental forces of nature or emulating the creatures of the animal world, druids are an embodiment of nature's resilience, cunning, and fury."
" They claim no mastery over nature, but see themselves as extensions of nature's indomitable will."),
"Fighter" : ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/359/420/618/636272697874197438.png\n"
"Fighters share an unparalleled mastery with weapons and armor, and a thorough knowledge of the skills of combat. "
"They are well acquainted with death, both meting it out and staring it defiantly in the face. You must have a Dexterity or Strength score of 13 or higher in order to multiclass in or out of this class."),
"Monk": ("https://www.seekpng.com/png/detail/107-1073422_monk-d-d-monk.png\n"
"Monks of the Way of the Open Hand are the ultimate masters of martial arts Combat, whether armed or unarmed."
"They learn Techniques to push and trip their opponents, manipulate ki to heal damage to their bodies, and practice advanced meditation that can protect them from harm."),
"Paladin": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/365/420/618/636272701937419552.png\n"
"Oath of Devotion. The Oath of Devotion binds a paladin to the loftiest ideals of justice, virtue, and order."
"Sometimes called cavaliers, white knights, or holy warriors, these paladins meet the ideal of the knight in shining armor, acting with honor in pursuit of justice and the greater good."),
"Ranger": ("https://lootthebody.files.wordpress.com/2015/09/teagan_2.jpg?w=736\n"
"Far from the bustle of cities and towns, past the hedges that shelter the most distant farms from the terrors of the wild,"
" amid the dense-packed trees of trackless forests and across wide and empty plains, rangers keep their unending watch."),
"Rogue": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/384/420/618/636272820319276620.png\n"
"Rogues rely on skill, stealth, and their foes' vulnerabilities to get the upper hand in any situation. They have a knack for finding the solution to just about any problem, demonstrating a resourcefulness and versatility that is the cornerstone of any successful adventuring party"),
"Sorcerer": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/485/420/618/636274643818663058.png\n"
"Sorcerers carry a magical birthright conferred upon them by an exotic bloodline, some otherworldly influence, or exposure to unknown cosmic forces. No one chooses sorcery; the power chooses the sorcerer."
"You must have a Charisma score of 13 or higher in order to multiclass in or out of this class."),
"Warlock": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/375/420/618/636272708661726603.png\n"
"Warlocks are seekers of the knowledge that lies hidden in the fabric of the multiverse. Through pacts made with mysterious beings of supernatural power, warlocks unlock magical effects both subtle and spectacular. You must have a Charisma score of 13 or higher in order to multiclass in or out of this class."),
"Wizard": ("https://vignette.wikia.nocookie.net/forgottenrealms/images/c/c0/Wizard_PHB5e.jpg/revision/latest?cb=20140921185413\n"
"Wizards are supreme magic-users, defined and united as a class by the spells they cast. Drawing on the subtle weave of magic that permeates the cosmos, wizards cast spells of explosive fire, arcing lightning, subtle deception, brute-force mind control, and much more."),
"Dragonborn": ("https://vignette.wikia.nocookie.net/forgottenrealms/images/3/3b/Dragonborn-5e.png/revision/latest?cb=20200308125107"
"\nBorn of dragons, as their name proclaims, the dragonborn walk proudly through a world that greets them with fearful incomprehension."
"Shaped by draconic gods or the dragons themselves, dragonborn originally hatched from dragon eggs as a unique race, combining the best attributes of dragons and humanoids."),
"Dwarf": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/254/420/618/636271781394265550.png"
"\nMost dwarves are lawful, believing firmly in the benefits of a well-ordered society."
" They tend toward good as well, with a strong sense of fair play and a belief that everyone deserves to share in the benefits of a just order."
" Size. Dwarves stand between 4 and 5 feet tall and average about 150 pounds."),
"Elf": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/7/639/420/618/636287075350739045.png"
"\nSlender and Graceful\nWith their unearthly grace and fine features, elves appear hauntingly beautiful to humans and members of many other races."
" They are slightly shorter than humans on average, ranging from well under 5 feet tall to just over 6 feet. They are more slender than humans, weighing only 100 to 145 pounds."),
"Gnome": ("https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcRowlh18XE_Zj-dBrIF7gKHPQ-MbFDZm6xn2rREDEO0M0kNnmrd&usqp=CAU"
"\nGnomes are light-hearted, and even the tricksters amongst them favor harmless pranks over vicious schemes. Size."
" Gnomes are between 3 and 4 feet tall and weigh around 40 pounds. Your size is Small."),
"Half-Elf": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/481/420/618/636274618102950794.png"
"\nHalf-elves share the chaotic bent of their elven heritage. They both value personal freedom and creative expression, demonstrating neither love of leaders nor desire for followers."
" They chafe at rules, resent others' demands, and sometimes prove unreliable, or at least unpredictable."),
"Halfling": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/256/420/618/636271789409776659.png"
"\nAs a rule, they are good-hearted and kind, hate to see others in pain, and have no tolerance for oppression. They are also very orderly and traditional, leaning heavily on the support of their community and the comfort of the old ways."
" Size. Halflings average about 3 feet tall and weigh about 40 pounds."),
"Half-Orc": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/466/420/618/636274570630462055.png"
"\nHalf-Orcs raised among orcs and willing to live out their lives among them are usually evil. "
"Size: Half-Orcs are somewhat larger and bulkier than Humans, and they range from 5 to well over 6 feet tall. Your size is Medium. Speed: Your base walking speed is 30 feet."),
"Human": ("https://media-waterdeep.cursecdn.com/avatars/thumbnails/6/258/420/618/636271801914013762.png"
"\nHumans are the most adaptable and ambitious people among the common races. They have widely varying tastes, morals, and customs in the many different lands where they have settled."
" When they settle, though, they stay: they build cities to last for the ages, and great kingdoms that can persist for long centuries."),
"Tiefling": ("https://dndguide.com/wp-content/uploads/2018/04/Tiefling.png"
"\nTiefling Traits\nTieflings share certain Racial Traits as a result of their Infernal descent. "
"Ability Score Increase: Your Intelligence score increases by 1, and your Charisma score increases by 2. Age: Tieflings mature at the same rate as Humans but live a few years longer."),
"Lawful Good": ("TODLawful Good. A lawful good character acts as a good person is expected or required to act. He combines a commitment to oppose evil with the discipline to fight relentlessly."
" He tells the truth, keeps his word, helps those in need, and speaks out against injustice.O"),
"Neutral Good": ("A neutral good character does the best that a good person can do. He is devoted to helping others. He works with kings and magistrates but does not feel beholden to them."
" Neutral good is the best alignment you can be because it means doing what is good without bias for or against order."),
"Chaotic Good": ("A chaotic good character acts as his conscience directs him with little regard for what others expect of him. He makes his own way, but he's kind and benevolent."
" He believes in goodness and right but has little use for laws and regulations. He hates it when people try to intimidate others and tell them what to do."),
"Lawful Neutral":("Lawful neutral is the best alignment you can be because it means you are reliable and honorable without being a zealot."
" Lawful neutral can be a dangerous alignment when it seeks to eliminate all freedom, choice, and diversity in society. ..."
" Subordinates will be treated as is due their station within society."),
"Neutral": "Neutral is the best alignment you can be because it means you act naturally, without prejudice or compulsion. "
"Neutral can be a dangerous alignment when it represents apathy, indifference, and a lack of conviction",
"Chaotic Neutral":("A chaotic neutral character follows his whims. He is an individualist first and last. "
"He values his own liberty but doesn't strive to protect others' freedom. He avoids authority, resents restrictions, and challenges traditions."),
"Lawful Evil":("Lawful Evil. A lawful evil villain methodically takes what he wants within the limits of his code of conduct without regard for whom it hurts."
" He cares about tradition, loyalty, and order but not about freedom, dignity, or life. He plays by the rules but without mercy or compassion."),
"Neutral Evil":("A neutral evil character is typically selfish and has no qualms about turning on allies-of-the-moment, and usually makes allies primarily to further their own goals. ... "
"Another valid interpretation of neutral evil holds up evil as an ideal, doing evil for evil's sake and trying to spread its influence."),
"Chaotic Evil":("A chaotic evil character does whatever his greed, hatred, and lust for destruction drive him to do. He is hot-tempered, vicious, arbitrarily violent, and unpredictable. ..."
" Chaotic evil beings believe their alignment is the best because it combines self-interest and pure freedom.")
}
ALIGNMENT_BUTTONS = [[InlineKeyboardButton("Lawful Good", callback_data="Lawful Good"),
InlineKeyboardButton("Neutral Good", callback_data="Neutral Good"),
InlineKeyboardButton("Chaotic Good", callback_data="Chaotic Good")],
[InlineKeyboardButton("Lawful Neutral", callback_data="Lawful Neutral"),
InlineKeyboardButton("Neutral", callback_data="Neutral"),
InlineKeyboardButton("Chaotic Neutral", callback_data="Chaotic Neutral")],
[InlineKeyboardButton("Lawful Evil", callback_data="Lawful Evil"),
InlineKeyboardButton("Neutral Evil", callback_data="Neutral Evil"),
InlineKeyboardButton("Chaotic Evil", callback_data="Chaotic Evil")]]
def ATTRIBUTE_MENU(attr):
ATTRIBUTE_BUTTONS = [[]]
for val in attr:
ATTRIBUTE_BUTTONS[0].append(InlineKeyboardButton(val, callback_data=val))
return ATTRIBUTE_BUTTONS
class Race:
SUB_RACE = {
"Dwarf" : {
"Hill Dwarf" : {
"con" : 2,
"wis" : 1
},
"Mountai Dwarf":{
"str": 2,
"con": 2
}
},
"Elf" : {
"Drow": {
"dex": 2,
"cha": 1,
},
"High Elf":{
"dex": 2,
"int": 1
},
"Wood Elf":{
"dex" :2,
"wis" :1
}
},
"Gnome":{
"Deep Gnome":{
"int" : 2,
"dex" : 1
},
"Rock Gnome":{
"int": 2,
"con":1
}
},
"Half-Elf":{
"Half-Elf":{
"cha" : 2,
"extra":2
}
}
}
def __init__(self):
self.name= "nome"
self.subrace= "subrace"
self.attr_mod = {
"str" : 0,
"dex" : 0,
"con" : 0,
"int" : 0,
"wis" : 0,
"cha" : 0,
"extra": 0
}
def set_race(self, name, subrace):
self.name = name
self.subrace = subrace
for att in self.SUB_RACE[name][subrace]:
self.attr_mod[att] += self.SUB_RACE[name][subrace][att]
LEVELS = {
"1" :{
"next_lvl" :300,
"prof": 2
},
"2" :{
"next_lvl" :900,
"prof": 2
},
"3" :{
"next_lvl" :2700,
"prof": 2
},
"4" :{
"next_lvl" :6500,
"prof": 2
},
"5" :{
"next_lvl" :14000,
"prof": 3
},
"6" :{
"next_lvl" :23000,
"prof": 3
},
"7" :{
"next_lvl" :34000,
"prof": 3
},
"8" :{
"next_lvl" :48000,
"prof": 3
},
"9" :{
"next_lvl" :64000,
"prof": 4
},
"10":{
"next_lvl" :85000,
"prof": 4
},
"11":{
"next_lvl" :100000,
"prof": 4
},
"12":{
"next_lvl" :120000,
"prof": 4
},
"13":{
"next_lvl" :140000,
"prof": 5
},
"14":{
"next_lvl" :165000,
"prof": 5
},
"15":{
"next_lvl" :195000,
"prof": 5
},
"16":{
"next_lvl" :225000,
"prof": 5
},
"17":{
"next_lvl" :26500,
"prof": 6
},
"18":{
"next_lvl" :30500,
"prof": 6
},
"19":{
"next_lvl" :355000,
"prof": 6
},
"20":{
"prof": 6
}
}
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure lots of actions in the same target don't cause exceeding command
line length.
"""
import sys
if sys.platform == 'win32':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('many-actions.gyp')
test.build('many-actions.gyp', test.ALL)
for i in range(200):
test.built_file_must_exist('generated_%d.h' % i)
test.pass_test()
|
from arago.actors import Router
class RoundRobinRouter(Router):
("""Routes received messages to its childdren """
"""in a round-robin fashion""")
def __init__(self, name=None, *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
self._next = self._round_robin()
def _round_robin(self):
while len(self._children) >= 1:
for item in self._children:
yield item
raise StopIteration
def _route(self, msg):
try:
return next(self._next)
except StopIteration:
self._loop.kill(ActorCrashedError)
|
import dash_bootstrap_components as dbc
from dash import html
accordion = html.Div(
dbc.Accordion(
[
dbc.AccordionItem(
[
html.P("This is the content of the first section"),
dbc.Button("Click here"),
],
title="Item 1",
),
dbc.AccordionItem(
[
html.P("This is the content of the second section"),
dbc.Button("Don't click me!", color="danger"),
],
title="Item 2",
),
dbc.AccordionItem(
"This is the content of the third section",
title="Item 3",
),
],
)
)
|
from django.contrib import admin
from skills.models import Skill
class SkillsAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'is_active')
list_display_links = ('id', 'title')
search_fields = ['title']
list_per_page = 20
admin.site.register(Skill, SkillsAdmin)
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:utils.py
# @Author: Michael.liu
# @Date:2020/6/3 17:49
# @Desc: this code is ....
import pandas as pd
# tf_preprocess
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import pickle as pkl
import numpy as np
import tensorflow as tf
from scipy.sparse import coo_matrix
def clean_df(df,training=True):
df = df.drop(
['site_id', 'app_id','device_id','device_ip','site_domain','site_category','app_domain', 'app_category'],
axis=1, inplace=True)
if training:
df = df.drop(['id'],axis=1)
return df
def load_df(filename, training=True,**csv_options):
df = pd.read_csv(filename,header=0,**csv_options)
#df = clean_df(df,training=training)
df = select_df(df,training=training)
return df
def select_df(df,training=True):
features = pd.read_csv('feature.csv')
x_columns = features.head(30)['feature'].tolist()
df = df[x_columns]
if training:
df = df.drop(['id'], axis=1)
return df |
# Push new files to Github within existing repository
# Go into the correct terminal in the directory (ex - HelloWorld)
# git status
# see files which have not been commited in required
# git add .
# git status
# se files turn to green meaning that you've updated them locally?
# git commit -m 'add comment here about what you changed'
# git status
# git push
# check the 'HelloWorld' or other repository for updates in Github
# To create new repo...
# Go into the correct terminal in the directory (ex - HelloWorld)
# git init
# git status
# git add .
# git status
# git commit -m 'add comment here about what you changed'
# git status
# git push
# Note: something about defaulting to remote location if not specified which
# should be GitHub for both HelloWorld and python-workshop at this point but
# if you make a new repo you might have to go through a different process
|
def printBoard(board):
for i in range(8):
for j in range(8):
print(board[i][j], end = " ")
print()
def isSafe(board, row, col):
for i in range(col):
if board[row][i] == 1:
return False
for i, j in zip(range(row, -1, -1), range(col, -1, -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(row, 8, 1), range(col, -1, -1)):
if board[i][j] == 1:
return False
return True
def findSafe(board, col):
if col >= 8:
return True
for i in range(8):
if isSafe(board, i, col):
board[i][col] = 1
if findSafe(board, col + 1) == True:
return True
board[i][col] = 0
return False
def solve():
board = [[0 for i in range(8)] for j in range(8)]
if findSafe(board, 0) == False:
print("No solution")
return False
printBoard(board)
return True
def main():
solve()
main()
|
from mago import *
from questi.models import *
from django.contrib.auth.models import User
clas = classe.objects.get(name_classe = nom_cl)
pro = prof.objects.get(name = name_proff)
cu = cours.objects.get(name_classe = clas.id, name_prof = pro.id)
questionnair = cu.questionnaire_set.create(nom_du_cours = nom_courr, nom_prof = name_proff,explication = expl, reference = refe)
quest_ref = questionnaire.objects.get(nom_prof = name_proff, reference = refe)
i = 0
j = 0
k = 0
p = 1
while i < len(data):
vrai = data_q[j][-1]
if len(data_q[j]) == 3:
quest_ref.questions_set.create(reference = refe,nom_du_cours = nom_courr,quest = data[i][0],rep_1 = data_q[j][0],rep_2 = data_q[j][1], rep_tru_id= vrai)
print(1)
elif len(data_q[j]) == 4:
quest_ref.questions_set.create(reference = refe,nom_du_cours = nom_courr,quest = data[i][0],rep_1 = data_q[j][0],rep_2 = data_q[j][1],rep_3 = data_q[j][2], rep_tru_id = vrai)
print(2)
elif len(data_q[j]) == 5:
quest_ref.questions_set.create(reference = refe,nom_du_cours = nom_courr,quest = data[i][0],rep_1 = data_q[j][0],rep_2 = data_q[j][1],rep_3 = data_q[j][2],rep_4 = data_q[j][3],rep_tru_id = vrai)
print(3)
i += 1
j += 1
|
#!/usr/bin/env python
# a bar plot with errorbars
import barPlot_general
if __name__ == "__main__":
relu16 = (82.46, 87.332, 91.416)
nrelu16= (85.352, 88.836, 94.084)
relu16_std = (1.542741067, 1.321616435, 1.475120334)
nrelu16_std = (1.691558453, 1.807478907, 1.037559637)
num16 = len(relu16)
relu32 = (87.624, 91.708, 93.20)
nrelu32= (90.874, 93.25, 94.18)
relu32_std = (1.667597074, 0.8405176976, 1.068770321)
nrelu32_std = (1.202114803, 1.518354372, 0.8003749122)
num32 = len(relu32)
relu64 = (92.374, 94, 93.418)
nrelu64= (93.248, 93.666, 93.75)
relu64_std = (0.4769486346, 0.9927738917, 1.386531644)
nrelu64_std = (0.7017620679, 1.526689228, 0.9672641831)
res_relu = relu16 + relu32 + relu64
res_nrelu = nrelu16 + nrelu32 + nrelu64
err_relu = relu16_std + relu32_std + relu64_std
err_nrelu = nrelu16_std + nrelu32_std + nrelu64_std
labels = barPlot_general.do_labels(3,[16,32,64])
barPlot_general.do_plot("Sports 8", res_relu, res_nrelu, err_relu, err_nrelu, labels)
|
def sum1(a,b):
return int(a)+int(b)
def sum2(a,b,c):
return (int(a)+int(b)+int(c))
def p():
return "1"
def aa():
return "abc"
|
a=str(input("enter the word"))
l=list(a)
b=len(l)
v=['a','e','i','o','u']
c=len(v)
e=[]
for i in range(0,b):
for j in range(0,c):
if(l[i]==v[j]):
break
else:
e.append(l[i])
print(e)
|
import warnings
from unittest import mock
import pytest
from rubicon_ml import domain
from rubicon_ml.client import Project, Rubicon
from rubicon_ml.exceptions import RubiconException
class MockCompletedProcess:
def __init__(self, stdout="", returncode=0):
self.stdout = stdout
self.returncode = returncode
def test_properties():
domain_project = domain.Project(
"Test Project",
description="a test project",
github_url="github.com",
training_metadata=domain.utils.TrainingMetadata([("test/path", "SELECT * FROM test")]),
)
project = Project(domain_project)
assert project.name == "Test Project"
assert project.description == "a test project"
assert project.github_url == "github.com"
assert project.training_metadata == domain_project.training_metadata.training_metadata[0]
assert project.created_at == domain_project.created_at
assert project.id == domain_project.id
def test_get_branch_name(project_client):
project = project_client
with mock.patch("subprocess.run") as mock_run:
mock_run.return_value = MockCompletedProcess(stdout=b"branch-name\n")
expected = [
mock.call(["git", "rev-parse", "--abbrev-ref", "HEAD"], capture_output=True),
]
assert project._get_branch_name() == "branch-name"
assert mock_run.mock_calls == expected
def test_get_commit_hash(project_client):
project = project_client
with mock.patch("subprocess.run") as mock_run:
mock_run.return_value = MockCompletedProcess(stdout=b"abcd0000\n")
expected = [
mock.call(["git", "rev-parse", "HEAD"], capture_output=True),
]
assert project._get_commit_hash() == "abcd0000"
assert mock_run.mock_calls == expected
def test_get_identifiers(project_client):
project = project_client
project_name, experiment_id = project._get_identifiers()
assert project_name == project.name
assert experiment_id is None
def test_create_experiment_with_auto_git():
with mock.patch("subprocess.run") as mock_run:
mock_run.return_value = MockCompletedProcess(stdout=b"test", returncode=0)
rubicon = Rubicon("memory", "test-root", auto_git_enabled=True)
project = rubicon.create_project("Test Project A")
project.log_experiment()
expected = [
mock.call(["git", "rev-parse", "--git-dir"], capture_output=True),
mock.call(["git", "remote", "-v"], capture_output=True),
mock.call(["git", "rev-parse", "--abbrev-ref", "HEAD"], capture_output=True),
mock.call(["git", "rev-parse", "HEAD"], capture_output=True),
]
assert mock_run.mock_calls == expected
rubicon.repository.filesystem.store = {}
def test_experiments_log_and_retrieval(project_client):
project = project_client
experiment1 = project.log_experiment(
name="exp1", training_metadata=[("test/path", "SELECT * FROM test")]
)
experiment2 = project.log_experiment(name="exp2")
assert experiment1._domain.project_name == project.name
assert len(project.experiments()) == 2
assert experiment1.id in [e.id for e in project.experiments()]
assert experiment2.id in [e.id for e in project.experiments()]
def test_experiment_by_id(rubicon_and_project_client):
project = rubicon_and_project_client[1]
_experiment = project.log_experiment(tags=["x"])
project.log_experiment(tags=["y"])
experiment = project.experiment(id=_experiment.id)
assert experiment.id == _experiment.id
def test_experiment_by_name(project_client):
project = project_client
project.log_experiment(name="exp1")
experiment = project.experiment(name="exp1")
assert experiment.name == "exp1"
def test_experiment_warning(project_client, test_dataframe):
project = project_client
experiment_a = project.log_experiment(name="exp1")
experiment_b = project.log_experiment(name="exp1")
with warnings.catch_warnings(record=True) as w:
experiment_c = project.experiment(name="exp1")
assert (
"Multiple experiments found with name 'exp1'. Returning most recently logged"
) in str(w[0].message)
assert experiment_c.id != experiment_a.id
assert experiment_c.id == experiment_b.id
def test_experiment_name_not_found_error(project_client):
project = project_client
with pytest.raises(RubiconException) as e:
project.experiment(name="exp1")
assert "No experiment found with name 'exp1'." in str(e)
def test_experiments_tagged_and(project_client):
project = project_client
experiment = project.log_experiment(tags=["x", "y"])
project.log_experiment(tags=["x"])
project.log_experiment(tags=["y"])
experiments = project.experiments(tags=["x", "y"], qtype="and")
assert len(experiments) == 1
assert experiment.id in [e.id for e in experiments]
def test_experiments_tagged_or(project_client):
project = project_client
experiment_a = project.log_experiment(tags=["x"])
experiment_b = project.log_experiment(tags=["y"])
project.log_experiment(tags=["z"])
experiments = project.experiments(tags=["x", "y"], qtype="or")
assert len(experiments) == 2
assert experiment_a.id in [e.id for e in experiments]
assert experiment_b.id in [e.id for e in experiments]
def test_dataframes_recursive(project_client, test_dataframe):
project = project_client
experiment = project.log_experiment()
df = test_dataframe
dataframe_a = project.log_dataframe(df)
dataframe_b = experiment.log_dataframe(df)
dataframes = project.dataframes(recursive=True)
assert len(dataframes) == 2
assert dataframe_a.id in [d.id for d in dataframes]
assert dataframe_b.id in [d.id for d in dataframes]
def test_to_dask_df(rubicon_and_project_client_with_experiments):
project = rubicon_and_project_client_with_experiments[1]
ddf = project.to_df(df_type="dask")
# compute to pandas df so we can use iloc for easier testing
df = ddf.compute()
# check that all experiments made it into df
assert len(df) == 10
# check the cols within the df
exp_details = ["id", "name", "description", "model_name", "commit_hash", "tags", "created_at"]
for detail in exp_details:
assert detail in df.columns
def test_to_pandas_df(rubicon_and_project_client_with_experiments):
project = rubicon_and_project_client_with_experiments[1]
df = project.to_df(df_type="pandas")
# check that all experiments made it into df
assert len(df) == 10
# check the cols within the df
exp_details = ["id", "name", "description", "model_name", "commit_hash", "tags", "created_at"]
for detail in exp_details:
assert detail in df.columns
def test_to_dask_df_grouped_by_commit_hash(rubicon_and_project_client_with_experiments):
project = rubicon_and_project_client_with_experiments[1]
ddfs = project.to_df(df_type="dask", group_by="commit_hash")
# compute to pandas df so we can use iloc for easier testing
dfs = [ddf.compute() for ddf in ddfs.values()]
# check df was broken into 4 groups
assert len(dfs) == 4
for df in dfs:
# check the cols within the df
exp_details = [
"id",
"name",
"description",
"model_name",
"commit_hash",
"tags",
"created_at",
]
for detail in exp_details:
assert detail in df.columns
def test_to_pandas_df_grouped_by_commit_hash(rubicon_and_project_client_with_experiments):
project = rubicon_and_project_client_with_experiments[1]
dfs = project.to_df(df_type="pandas", group_by="commit_hash")
# check df was broken into 4 groups
assert len(dfs) == 4
for df in dfs.values():
# check the cols within the df
exp_details = [
"id",
"name",
"description",
"model_name",
"commit_hash",
"tags",
"created_at",
]
for detail in exp_details:
assert detail in df.columns
def test_to_df_grouped_by_invalid_group(rubicon_and_project_client_with_experiments):
project = rubicon_and_project_client_with_experiments[1]
with pytest.raises(ValueError) as e:
project.to_df(group_by="INVALID")
assert "`group_by` must be one of" in str(e)
|
import matplotlib.image as mpimg
import process
from moviepy.editor import VideoFileClip
import os
import tensorflow as tf
from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1
dataset = "voc_2007_trainval+voc_2012_trainval"
nnet = "res101_faster_rcnn_iter_110000.ckpt"
tfmodel = os.path.join("/home/veon/edu/tf-faster-rcnn-master/data", dataset, nnet)
#'../data', dataset, nnet)
if not os.path.isfile(tfmodel + '.meta'):
raise IOError(('{:s} not found.\nDid you download the proper networks from '
'our server and place them properly?').format(tfmodel + '.meta'))
# set config
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
if nnet.startswith('vgg16'):
net = vgg16(batch_size=1)
elif nnet.startswith('res101'):
net = resnetv1(batch_size=1, num_layers=101)
net.create_architecture(sess, "TEST", 21,
tag='default', anchor_scales=[8, 16, 32])
saver = tf.train.Saver()
saver.restore(sess, tfmodel)
print('Loaded network {:s}'.format(tfmodel))
process._sess = sess
process._net = net
#process._debug = False
clip1 = VideoFileClip("../project_video.mp4")
video_clip = clip1.fl_image(process.process_image)
video_clip.write_videofile("../project_out1.mp4", codec='mpeg4', audio=False)
|
tc = int(input())
for i in range(tc):
n = int(input())
d=[]
for j in range(n):
s=input()
if s not in d:
d.append(s)
print(len(d))
|
'''
Module to manage Checks
'''
from __future__ import absolute_import
from socket import error as socket_error
# Import salt libs
import salt.utils
import logging
import time
log = logging.getLogger(__name__)
try:
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
# Define the module's virtual name
__virtualname__ = 'checks'
def __virtual__():
if HAS_LIBS:
return __virtualname__
def http(name, **kwargs):
time.sleep(5)
for x in range(0, 10):
try:
data = __salt__['http.query'](name, **kwargs)
break
except socket_error:
data = 'nogo'
return data
|
from django.db import models
from django_extensions.db.models import TimeStampedModel
from django_extensions.db.fields.json import JSONField
from documentos.models import Frame, GoalStandard
class ClassifierModel(TimeStampedModel):
json_model = JSONField()
name = models.TextField()
datatxt_id = models.TextField(blank=True, null=True)
testing_task_id = models.TextField(blank=True, null=True)
generation_frames = models.ManyToManyField(Frame)
goal_standard = models.ForeignKey(GoalStandard)
def __unicode__(self):
return self.name
|
# Hierarchical Clustering
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, [3, 4]].values
#create dendogram
from scipy.cluster import hierarchy as sch
dendogram = sch.dendrogram(sch.linkage(X,method='ward'))
plt.title('Dendogram')
plt.xlabel('customers')
plt.ylabel('Distance')
plt.show()
#build train and run hierarchical clustering model
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters=3, affinity='euclidean',linkage='ward')
y_hc = hc.fit_predict(X)
plt.scatter(X[y_hc == 0,0],X[y_hc == 0,1], s=100, c='red',label='Cluster 0')
plt.scatter(X[y_hc == 1,0],X[y_hc == 1,1], s=100, c='blue',label='Cluster 1')
plt.scatter(X[y_hc == 2,0],X[y_hc == 2,1], s=100, c='green',label='Cluster 2')
plt.title('KMeans Clustering using Elbow Method')
plt.xlabel('Annual Income')
plt.ylabel('Spending Score')
plt.legend()
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.