text stringlengths 8 6.05M |
|---|
from setuptools import setup, find_packages
requirements = [
r.strip() for r in open('requirements.txt').readlines() if '#' not in r]
setup(
name='gosuslugi-api',
author='Greg Eremeev',
author_email='gregory.eremeev@gmail.com',
version='0.7.0',
license='BSD-3-Clause',
url='https://github.com/GregEremeev/gosuslugi-api',
install_requires=requirements,
description='Toolset to work with dom.gosuslugi.ru API',
packages=find_packages(),
extras_require={'dev': ['ipdb==0.12.2', 'pytest==5.2.1']},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython'
],
zip_safe=False,
include_package_data=True
)
|
n = int(input())
for x in range(1, n):
n = n * x
print(n) |
deposito = float(input("Deposito inicial: "))
taxa = float(input("Taxa de juros: "))
investimento = float(input("Deposito mensal: "))
mes = int(input("Entre com o numero de meses: "))
render = 1
saldo = deposito
while render <= mes:
saldo = saldo + (saldo * (taxa / 100)) + investimento
print (f"Saldo do mês {render} é de R${saldo:5.2f}.")
render = render + 1
valor_obtido = saldo - deposito
print(f"O ganho obtido com os juros foi de R${valor_obtido:8.2f}.") |
#rotates chunks' bounding box in accordance of coordinate system for active chunk
#bounding box size is kept
#compatibility: Agisoft PhotoScan Professional 1.1.0
import PhotoScan
import math
doc = PhotoScan.app.document
chunk = doc.chunk
T = chunk.transform.matrix
v_t = T * PhotoScan.Vector( [0,0,0,1] )
v_t.size = 3
if chunk.crs:
m = chunk.crs.localframe(v_t)
else:
m = PhotoScan.Matrix().diag([1,1,1,1])
m = m * T
s = math.sqrt(m[0,0] ** 2 + m[0,1] ** 2 + m[0,2] ** 2) #scale factor
R = PhotoScan.Matrix( [[m[0,0],m[0,1],m[0,2]], [m[1,0],m[1,1],m[1,2]], [m[2,0],m[2,1],m[2,2]]])
R = R * (1. / s)
reg = chunk.region
reg.rot = R.t()
chunk.region = reg |
"""
制作训练集的tfrecord文件
使用COVID19类,获取数据,之后按照将获取到的数据处理成tfrecord的存储格式,写入到tfrecord,保存到磁盘
"""
import os
import numpy as np
import tensorflow as tf
from Utils import COVID19
def serialize_example(x, mask, lung_mask):
"""converts x, mask, lung_mask to tf.train.Example and serialize"""
# 1.构建用于创建Feature对象的 list
# 注意,value只接受一维数组,所以要将二维的图片x,reshape成一维,或者将二维图片转成字符串
# 同理,mask和lung_mask同上
input_features = tf.train.BytesList(value=[x.tobytes()])
mask_ = tf.train.BytesList(value=[mask.tobytes()])
lung_mask_ = tf.train.BytesList(value=[lung_mask.tobytes()])
shape = tf.train.Int64List(value=list(x.shape))
# 2.构建Feature对象
features = tf.train.Features(
feature={
"image": tf.train.Feature(bytes_list=input_features),
"mask": tf.train.Feature(bytes_list=mask_),
"lung_mask": tf.train.Feature(bytes_list=lung_mask_),
"shape": tf.train.Feature(int64_list=shape)
}
)
# 3.Features组建Example
example = tf.train.Example(features=features)
# 4.将Example序列化
return example.SerializeToString()
def main(Covid19_dir, TFRecord_save_dir):
# 存储到多个tfrecord文件中
batch_size = 10
covid19 = COVID19.Covid19(Covid19_dir, batch_size=batch_size)
# 定义存储的tfrecord文件名,压缩格式
# 定义存储路径 dir
dir = TFRecord_save_dir
if not os.path.exists(dir):
os.mkdir(dir)
# 500个样本存储成一个tfrecord文件
num_each_file = 500
total_file_num = int(np.ceil(covid19.length/num_each_file))
filename_zip = "train-{:02d}-of-{:02d}.zip"
# 定义压缩的可选项options
options = tf.io.TFRecordOptions(compression_type="GZIP")
# 打开文件进行写入
counter = 0
for now_num in range(total_file_num):
file_name = filename_zip.format(now_num, total_file_num)
print(file_name)
name = os.path.join(dir, file_name)
with tf.io.TFRecordWriter(name, options=options) as writer:
for i in range(int(num_each_file/batch_size)): # 每一个tfrecord文件需要调用next_batch()的次数 500/10 = 50
counter = counter+1
batch_x, batch_mask, batch_lung_mask = covid19.next_batch()
# 1.创建用于Feature对象的List
# 将batch中的样本逐个取出
for x, mask, lung_mask in zip(batch_x, batch_mask, batch_lung_mask):
# 每次写入一个样本的
writer.write(serialize_example(x, mask, lung_mask))
print("counter:", counter)
print("counter*batch_size:", counter*batch_size)
print("covid19.length:", covid19.length)
if __name__ == "__main__":
# 训练集的TFRecord生成
Covid19_dir = "D:\\data\\COVID19\\"
TFRecord_save_dir = "D:\\data\\COVID19\\TFRecord"
# 调用main传入两个dir即可自动完成数据转换成TFRecord格式
main(Covid19_dir, TFRecord_save_dir)
|
#name= (input("enter your name:")
#family = (input("enter your family:"))
n=input('enter your name:')
f=input('enter your name:')
nomre1=int(input('enter score one'))
nomre2=int(input('enter score two'))
nomre3=int(input('enter score three'))
majmoe=nomre1+nomre2+nomre3
miangin=majmoe/3
if miangin >= 17 :
print(n,f,'average=',miangin,"your condition is grat")
elif miangin < 17 and miangin >=12:
print (n,f,'average=',miangin,"your condition is normal")
elif miangin <12:
print(n,f,'average=',miangin,"your condition is fail") |
import unittest
from katas.beta.which_string_is_worth_more import highest_value
class HighestValueTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(highest_value('AaBbCcXxYyZz0189', 'KkLlMmNnOoPp4567'),
'KkLlMmNnOoPp4567')
def test_equal_2(self):
self.assertEqual(highest_value('HELLO', 'GOODBYE'), 'GOODBYE')
|
class Vertex:
def __init__(self, name, weight = 0):
self.name = name
self.weight = weight
def get_name(self):
return self.name
class Graphs:
def __init__(self):
self.adjancy_list = {}
def addVertex(self, vertex):
if(self.adjancy_list.get(vertex) is not None):
return
self.adjancy_list[vertex] = []
def addEdge(self, first_vertex, second_vertex):
if(self.adjancy_list.get(first_vertex) is None or self.adjancy_list.get(second_vertex) is None):
return False
# checking to see if the edge already exists and if so, don't add it
if(self.adjancy_list[first_vertex].count(second_vertex) > 0):
return False
if(first_vertex is second_vertex):
return False
self.adjancy_list[first_vertex].append(second_vertex)
self.adjancy_list[second_vertex].append(first_vertex)
return True
def printGraph(self):
for item in self.adjancy_list:
print(f"{item}:{self.adjancy_list[item]}")
def returnGraph(self):
return self.adjancy_list
def removeEdge(self, first_vertex, second_vertex):
if(self.adjancy_list.get(first_vertex) is None or self.adjancy_list.get(second_vertex) is None):
return False
if(first_vertex is second_vertex):
return False
self.adjancy_list[first_vertex].remove(second_vertex)
self.adjancy_list[second_vertex].remove(first_vertex)
return True
def removeVertex(self, vertex):
if (self.adjancy_list.get(vertex) is None):
return
for item in self.adjancy_list:
self.removeEdge(item, vertex)
self.adjancy_list.pop(vertex)
def dfs(self,vertex):
visited = {}
result = []
self.dfsHelper(vertex,visited,result)
return result
def dfsHelper(self,vertex, visited, result):
if vertex is None:
return
if visited.get(vertex) is None:
visited[vertex] = True
result.append(vertex)
else:
return
for item in self.adjancy_list[vertex]:
self.dfsHelper(item,visited,result)
# the recursive and iterative result won't match exactly
# however the way the iterative and recursive solution traverse
# the list are same here, so it is fine
# https://www.udemy.com/course/js-algorithms-and-data-structures-masterclass/learn/lecture/8344890#questions
# watch from 3:56
def dfsIterative(self,vertex):
stack = []
stack.append(vertex)
result = []
visited = {}
while len(stack)>0:
poped_vertex = stack.pop()
if visited.get(poped_vertex) is None:
result.append(poped_vertex)
visited[poped_vertex] = True
for item in self.adjancy_list[poped_vertex]:
if visited.get(item) is None:
stack.append(item)
return result
def bfs(self, vertex):
visited = {}
result = []
queue = []
queue.append(vertex)
while len(queue):
current_vertex = queue.pop(0)
if visited.get(current_vertex) is None:
visited[current_vertex] = True
result.append(current_vertex)
for item in self.adjancy_list[current_vertex]:
if visited.get(item) is None:
visited[item] = True
result.append(item)
queue.append(item)
return result
g = Graphs()
g.addVertex("A")
g.addVertex("B")
g.addVertex("C")
g.addVertex("D")
g.addVertex("E")
g.addVertex("F")
g.addEdge("A", "B")
g.addEdge("A", "C")
g.addEdge("B", "D")
g.addEdge("C", "E")
g.addEdge("D", "E")
g.addEdge("D", "F")
g.addEdge("E", "F")
# g.printGraph()
# print("Removing vertex")
# graph.removeVertex("A")
# graph.printGraph()
print(g.dfs("A"))
print(g.dfsIterative("A"))
print(g.bfs("A"))
|
tab = int(input('Please type a number to see your table'))
a = tab * 0
b = tab * 1
c = tab * 2
d = tab * 3
e = tab * 4
f = tab * 5
g = tab * 6
h = tab * 7
i = tab * 8
j = tab * 9
l = tab * 10
print(' The table of {} is \n x 0={} \n x 1={} \n x 2={} \n x 3={} \n x 4={} \n x 5={} \n x 6={} \n x 7={} \n x 8={} \n x 9={} \n x 10={}'.format(tab,a,b,c,d,e,f,g,h,i,j,l)) |
from pyspark import SparkContext, SparkConf
import time
import sys
sc = SparkContext(appName="word_counter")
files_to_evaluate = sys.argv[3:]
print("Put the number of workers in the cluster")
machines = sys.argv[1]
print("Put the number of cores in each machines")
cores = sys.argv[2]
for i in files_to_evaluate:
j = 1
# Simply reading the context for spark and spliting using lines
lineas = sc.textFile("/home/Frequency_words_Spark_vs_Sequential_Python/" + i ,minPartitions=2).flatMap(lambda line: line.split(" "))
start_time = time.time()
print("---------------------------Time counter has started---------------------------")
# Map reduce for counting the number of words
contarPalabras = lineas.map(lambda word: (word, 1)).reduceByKey(lambda v1,v2:v1 +v2)
counted_list = contarPalabras.collect()
print(counted_list)
file = open('record_' +'cores_' + cores + '_machines_' + machines + '_' + str(j) ,'a')
execution_time = str(time.time()-start_time)
file.write(execution_time +'\n')
print(f"the total execution time was: {(time.time()-start_time)}")
j += 1
time.sleep(15)
|
#Given 2 ints, a and b, return True if one if them is 10 or if their sum is 10.
def makes10(a,b):
if ((a == 10) or (b == 10)) or (10 == a + b):
return True
else:
return False
#def makes10(a, b):
#return (a == 10 or b == 10 or a+b == 10)
#self note: dont need all the ( because only one needs to be 10 for true.
|
# Reading the e960401 file for basic manipulation
import nltk
from nltk.corpus import PlaintextCorpusReader
corpus_root = '../../../Corpus'
excelsior = PlaintextCorpusReader(corpus_root, '.*\.txt')
print("Available articles ", excelsior.fileids())
article_name = 'e960401.txt'
article = excelsior.words(fileids=article_name)
article_lower = [w.lower() for w in article]
print(article_name, " has ", len(article_lower), " tokens.")
vocabulary = sorted(set(article_lower))
print(vocabulary)
print(article_name, " has a vocabulary length of ", len(vocabulary), ".")
text = nltk.Text(article_lower)
# text.concordance('empresa')
bag = []
for cl in text.concordance_list('empresa'):
left = list(cl[0][-4:])
right = list(cl[2][-4:])
bag += left
bag += right
print("The bag of words of 'empresa' is: ", bag)
print("Words similar to 'empresa': ", text.similar('empresa'))
|
import matplotlib.pyplot as pl
import numpy as np
from matplotlib.colors import LogNorm
import itertools as it
class CorrPlot(object):
"""
Visualize the data and the models
"""
def __init__(self, log=False):
self.fig, self.ax = pl.subplots()
self.xlim = None
self.ylim = None
self.log = log
if log:
self.ax.set_xscale('log')
self.ax.set_yscale('log')
def plot_data(self, x, y, perc=99., nbins=200, xlim=None, ylim=None):
"""
Show data as 2D histogram
"""
if xlim is None:
self.xlim = np.percentile(x, [(100.-perc)/2., (100.+perc)/2.])
else:
self.xlim = xlim
if ylim is None:
self.ylim = np.percentile(y, [(100.-perc)/2., (100.+perc)/2.])
else:
self.ylim = ylim
if self.log:
xbins, ybins = [np.logspace(
np.log10(lim[0]),
np.log10(lim[1]),
nbins) for lim in [self.xlim, self.ylim]]
else:
xbins, ybins = [np.linspace(
lim[0],
lim[1],
nbins) for lim in [self.xlim, self.ylim]]
counts, _, _ = np.histogram2d(y, x, bins=(ybins, xbins))
im = self.ax.pcolormesh(
xbins, ybins,
counts,
norm=LogNorm(),
cmap='YlGnBu_r',
label=r'$\rm Data$')
self.ax.set_xlabel(r'$N_{\rm HI}$')
self.ax.set_ylabel(r'$I$')
pl.colorbar(im, ax=self.ax)
return counts, xbins, ybins
def get_xvals(self):
if self.log:
xvals = np.logspace(
np.log10(self.xlim[0]),
np.log10(self.xlim[1]),
100)
else:
xvals = np.linspace(self.xlim[0], self.xlim[1], 100)
return xvals
def plot_fit(self, theta, Z, label=None):
"""
Plot a single fit
"""
xvals = self.get_xvals()
yvals = np.tan(theta) * xvals + Z
self.ax.plot(xvals, yvals, label=label)
return 0
def plot_mc_samples(self, chainr, nlines=50, label=None):
"""
Plot a series of MCMC samples
"""
xvals = self.get_xvals()
sliter = it.izip(
np.random.permutation(chainr)[:nlines],
it.chain([label], it.repeat(None)))
_ = [pl.plot(
xvals,
np.tan(theta)*xvals+Z,
alpha=0.15,
color='k',
label=l) for (theta, Z, _, _), l in sliter]
return 0
|
#!/usr/bin/python3
""" module containts unittests for class State """
import unittest
import json
from models.base_model import BaseModel
from models.state import State
class testState(unittest.TestCase):
""" unittests for State """
def setUp(self):
""" Sets up the class """
self.state = State()
def tearDown(self):
""" Test for tear down """
del self.state
def test_attribute(self):
""" Test for saved attributes """
s1 = State()
self.assertEqual(s1.name, "")
if __name__ == "__main__":
testState()
|
import xmpp
import redis
from threading import Timer
user = 'k'
password = 'k'
station_no = '1'
def get_local_data():
try:
r = redis.Redis('localhost', port = 6379, db = 0)
except Exception, (e):
print str(e)
macs = r.hkeys('station_'+station_no)
for i in macs:
msg[i] = r.lrange(i+"_"+station_no,0,-1)
resp[user] = {}
return json.dumps(msg)
def send_msg(msg,recipients):
msgString = get_local_data()
for to in recipients:
message = xmpp.Message(to,msg)
message.setAttr('type', 'chat')
connection.send(message)
print "sent message to "+to
connection = xmpp.Client('localhost',debug=[])
connection.connect(server=('localhost',5222))
connection.auth(user,password,'Online')
connection.sendInitPresence()
roster = connection.getRoster()
recipients = roster.getItems()
recipients.remove(user+'@localhost')
t = Timer(3.0,send_msg(recipients))
t.start() |
import argparse
import glob
import os
import re
import wandb
def parse_log_line(line):
split = re.split(" ", line)
loss = float(split[2].replace(",", "")) # Remove ',' right after the loss value
step_index = split.index("step") + 2
step = int(split[step_index])
return loss, step
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--log_pattern", "-l", required=True, type=str)
parser.add_argument("--wandb_project", "-p", type=str, default="splinter")
args = parser.parse_args()
log_files = glob.glob(args.log_pattern)
for file_path in log_files:
print("*" * 20)
print(f"Processing {file_path}")
print("*" * 20)
run_name = os.path.basename(file_path).replace(".log", "").replace(".txt", "")
run = wandb.init(project=args.wandb_project, name=run_name)
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line.startswith("INFO") and "loss" in line and "step" in line:
loss, step = parse_log_line(line)
run.log(data={"loss": loss}, step=step)
run.finish() |
# Generated by Django 2.0.6 on 2018-08-24 20:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('checkout', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='purchase',
old_name='phone',
new_name='phone_number',
),
migrations.RenameField(
model_name='purchase',
old_name='street_address1',
new_name='street_address_1',
),
migrations.RenameField(
model_name='purchase',
old_name='street_address2',
new_name='street_address_2',
),
]
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by 'bens3' on 2013-06-21.
Copyright (c) 2013 'bens3'. All rights reserved.
"""
import os
import unittest
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_raises
from nose.tools import raises
from ke2mongo import config
from ke2mongo.lib.ckan import call_action
import psycopg2
from ke2mongo.bulk import main as bulk_run
from ke2mongo import config
import random
import shutil
import tempfile
class TestMongo(unittest.TestCase):
export_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'bulk')
archive_dir = '/vagrant/tmp/ke2mongo'
files = []
@classmethod
def setup_class(cls):
# Remove and recreate export dir
try:
shutil.rmtree(cls.export_dir)
except OSError:
pass
os.mkdir(cls.export_dir)
try:
os.mkdir(cls.archive_dir)
except OSError:
pass
# Create dummy test files
file_names = [
'eaudit.deleted-export',
'ecatalogue.export',
'emultimedia.export',
'etaxonomy.export'
]
# Create empty files
dates = []
for i in range(5):
dates.append(random.getrandbits(64))
dates.sort()
for date in dates:
# Create a list of files and the batch they're part of
files = []
for file_name in file_names:
file_name += '.%s' % date
f = os.path.join(cls.export_dir, file_name)
files.append(file_name)
# Create the file in the data bulk directory
open(f, 'a').close()
cls.files.append(files)
def test_order(self):
config.set('keemu', 'export_dir', self.export_dir)
config.set('keemu', 'archive_dir', self.archive_dir)
print self.files
# Run the bulk import
bulk_run()
existing_files = self.files.pop()
for existing_file in existing_files:
f = os.path.join(self.export_dir, existing_file)
assert os.path.isfile(f), 'File %s does not exist' % f
for deleted_files in self.files:
for f in deleted_files:
export_file = os.path.join(self.export_dir, f)
archive_file = os.path.join(self.archive_dir, f)
# Make sure the export file has been deleted
assert not os.path.isfile(export_file), 'File %s should have been deleted' % export_file
# And the archive file has been created
assert os.path.isfile(archive_file), 'Archive file %s has not been created' % archive_file
|
"""
This module lets you practice the ACCUMULATOR pattern
in its simplest classic forms:
SUMMING: total = total + number
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Muqing Zheng. September 2015.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
def main():
""" Calls the TEST functions in this module. """
test_sum_cosines()
test_sum_square_roots()
def test_sum_cosines():
""" Tests the sum_cosines function. """
# ------------------------------------------------------------------
# DONE: 2. Implement this function.
# It TESTS the sum_cosines function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_cosines function:')
print('--------------------------------------------------')
# Test 1:
expected = 1.1241554693209974
answer = sum_cosines(2)
print('Expected and actual are:', expected, answer)
# Test 2:
expected = 0.134162972720552
answer = sum_cosines(3)
print('Expected and actual are:', expected, answer)
# Test 3:
expected = -0.5194806481430599
answer = sum_cosines(4)
print('Expected and actual are:', expected, answer)
def sum_cosines(n):
"""
Returns the sum of the cosines of the integers 0, 1, 2, ... n,
for the given n.
For example, sum_cosines(3) returns
cos(0) + cos(1) + cos(2) + cos(3) which is about 0.13416.
Precondition: n is a non-negative integer.
"""
# ------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Note that you should write its TEST function first (above).
# That is called TEST-DRIVEN DEVELOPMENT (TDD).
#
# No fair running the code of sum_cosines to GENERATE
# test cases; that would defeat the purpose of TESTING!
# ------------------------------------------------------------------
import math
total = 0
for i in range(n + 1):
total = math.cos(i) + total
return total
def test_sum_square_roots():
""" Tests the sum_square_roots function. """
# ------------------------------------------------------------------
# done: 4. Implement this function.
# It TESTS the sum_square_roots function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous
# TEST functions, including the same way to print expected/actual.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_square_roots function:')
print('--------------------------------------------------')
# Test 1:
expected = 3.414213562373095
answer = sum_square_roots(2)
print('Expected and actual are:', expected, answer)
# Test 2:
expected = 5.863703305156273
answer = sum_square_roots(3)
print('Expected and actual are:', expected, answer)
# Test 3:
expected = 8.692130429902463
answer = sum_square_roots(4)
print('Expected and actual are:', expected, answer)
def sum_square_roots(n):
"""
Returns the sum of the square roots of the integers
2, 4, 6, 8, ... 2n for the given n.
So if n is 7, the last term of the sum
is the square root of 14 (not 7).
For example, sum_square_roots(5) returns
sqrt(2) + sqrt(4) + sqrt(6) + sqrt(8) + sqrt(10),
which is about 11.854408.
Precondition: n is a non-negative integer.
"""
# ------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
# That is called TEST-DRIVEN DEVELOPMENT (TDD).
#
# No fair running the code of sum_square_roots to GENERATE
# test cases; that would defeat the purpose of TESTING!
# ------------------------------------------------------------------
import math
total = 0
for i in range(0, 2 * n + 2, 2):
total = total + math.sqrt(i)
return total
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
# -*- coding: utf-8 -*-
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
from matplotlib.image import imsave
from matplotlib.colors import ListedColormap
from ..fmtxt import Image
from ._colors import ColorBar
class BrainMixin(object):
"Additions to "
# Mixin that adds Eelbrain functionality to the pysurfer Brain class,
# defined in a separate module so that documentation can be built without
# importing Mayavi
def __init__(self, unit):
self.__unit = unit
def image(self, name, format='png', alt=None):
"""Create an FMText Image from a screenshot
Parameters
----------
name : str
Name for the file (without extension; default is 'image').
format : str
File format (default 'png').
alt : None | str
Alternate text, placeholder in case the image can not be found
(HTML `alt` tag).
"""
im = self.screenshot('rgba', True)
return Image.from_array(im, name, format, alt)
def plot_colorbar(self, label=True, label_position=None, label_rotation=None,
clipmin=None, clipmax=None, orientation='horizontal',
*args, **kwargs):
"""Plot a colorbar corresponding to the displayed data
Parameters
----------
label : None | str
Label for the x-axis (default is based on the data).
label_position : 'left' | 'right' | 'top' | 'bottom'
Position of the axis label. Valid values depend on orientation.
label_rotation : scalar
Angle of the label in degrees (For horizontal colorbars, the default is
0; for vertical colorbars, the default is 0 for labels of 3 characters
and shorter, and 90 for longer labels).
clipmin : scalar
Clip the color-bar below this value.
clipmax : scalar
Clip the color-bar above this value.
orientation : 'horizontal' | 'vertical'
Orientation of the bar (default is horizontal).
Returns
-------
colorbar : plot.ColorBar
ColorBar plot object.
"""
if self._hemi in ('both', 'split', 'lh'):
data = self.data_dict['lh']
elif self._hemi == 'rh':
data = self.data_dict[self._hemi]
else:
raise RuntimeError("Brain._hemi=%s" % repr(self._hemi))
cmap = ListedColormap(data['orig_ctable'] / 255., "Brain Colormap")
return ColorBar(cmap, data['fmin'], data['fmax'], label, label_position,
label_rotation, clipmin, clipmax, orientation,
self.__unit, (), *args, **kwargs)
def save_image(self, filename, transparent=True):
"""Save current image to disk
Parameters
----------
filename: str
Destination for the image file (format is inferred from extension).
transparent : bool
Whether to make background transparent (default True).
"""
if transparent:
mode = 'rgba'
else:
mode = 'rgb'
im = self.screenshot(mode)
imsave(filename, im)
|
import socket
import _thread
from stepper import Stepper_28BYJ
import ssd1306
import machine
import time
#pins0 = [25,26,2,14] Conflict with I2C
#pins1 = [16,5,4,0]
pins0 = [25,26,12,13]
pins1 = [16,0,2,14]
def process_command(data,motors):
pass
def display_thread(motors):
i2c = machine.I2C(scl=machine.Pin(4), sda=machine.Pin(5))
oled = ssd1306.SSD1306_I2C(128, 64, i2c)
while True:
oled.fill(0)
oled.text('P0= '+str(motors[0].get_position()), 0, 0)
oled.text('P1= ' + str(motors[1].get_position()), 0, 10)
oled.show()
time.sleep_ms(1000)
def main():
min_period = 2
m1 = Stepper_28BYJ(pins0,None,5)
m2 = Stepper_28BYJ(pins1,None,5)
motors = [m1,m2]
_thread.start_new_thread(display_thread,(motors,))
addr = socket.getaddrinfo('0.0.0.0',80)[0][-1]
s = socket.socket()
s.bind(addr)
s.listen(1)
while True:
#try:
cl,addr = s.accept()
while True:
cl_file = cl.makefile('rwb',0)
data = cl_file.readline()
if len(data) == 0:
cl_file.close()
cl.close()
break
if len(data)>100:
data = data[:100]
data = data[:-2].decode('latin').split(' ')
cmd = data[0]
try:
if cmd == 'GOTO' and len(data)>=3:
motor_no = int(data[1])
if motor_no>len(motors):
raise ValueError
pos = int(data[2])
motors[motor_no].goto(pos)
elif cmd == 'POSITION' and len(data) >= 2:
motor_no = int(data[1])
if motor_no > len(motors):
raise ValueError
cl_file.write(str(motors[motor_no].get_position()).encode('latin')+b'\r\n')
elif cmd == 'ZERO' and len(data) >= 2:
motor_no = int(data[1])
if motor_no > len(motors):
raise ValueError
if len(data)>=3:
zero = int(data[2])
else:
zero = None
motors[motor_no].set_zero(zero)
elif cmd == 'SPEED' and len(data) >= 3:
motor_no = int(data[1])
if motor_no > len(motors):
raise ValueError
timer_period = int(data[2])
if timer_period<min_period:
raise ValueError
motors[motor_no].set_speed(timer_period)
elif cmd == 'READY':
ready = all([m.ready() for m in motors])
ready = str(int(ready)).encode('latin')
cl_file.write(ready + b'\r\n')
except ValueError:
continue
# except :
# print('Unhandled expection, closing server')
# s.close()
# return |
#!/usr/bin/env python
#coding:utf8
from flask import Blueprint
analysis = Blueprint('analysis',__name__)
from analysis import views
|
import django_filters
from django.db import models
from invoices.models import Invoice
class InvoiceFilter(django_filters.FilterSet):
patient_id__first_name = django_filters.CharFilter(lookup_expr='iexact')
patient_id__last_name = django_filters.CharFilter(lookup_expr='iexact')
patient_id__insurance = django_filters.CharFilter(lookup_expr='iexact', label='Patient insurance company')
class Meta:
model = Invoice
fields = {
'patient_id' : ['exact'],
'patient_id__first_name' : [],
'patient_id__last_name' : [],
'amount' : ['gt', 'lt'],
'creation_date' : ['gt', 'lt', 'exact'],
'patient_id__insurance' : ['exact'],
} |
from pathlib import Path
import sys, time
import torch
import torch.optim as optim
import numpy as np
import json
from utils.clevr import load_vocab, ClevrDataLoaderNumpy, ClevrDataLoaderH5
from tbd.module_net_binary import TbDNet
import h5py
from tbd.module_net import load_tbd_net
from utils.generate_programs import load_program_generator, generate_programs
vocab = load_vocab(Path('data/vocab.json'))
model_path = Path('./models')
program_generator_checkpoint = 'program_generator.pt'
tbd_net_checkpoint = './models/clevr-reg.pt'
# tbd_net = load_tbd_net(tbd_net_checkpoint, vocab, feature_dim=(512, 28, 28))
# tbd_net_checkpoint = './binary_example-13.pt'
tbd_net = load_tbd_net(tbd_net_checkpoint, vocab, feature_dim=(1024, 14, 14))
program_generator = load_program_generator(model_path / program_generator_checkpoint)
BATCH_SIZE = 64
val_loader_kwargs = {
'question_h5': Path('/mnt/fileserver/shared/datasets/CLEVR_v1/data/val_questions_query_ending.h5'),
# 'feature_h5': Path('/mnt/fileserver/shared/datasets/CLEVR_v1/data/val_features_28x28.h5'),
'feature_h5': Path('/mnt/fileserver/shared/datasets/CLEVR_v1/data/val_features.h5'),
'batch_size': BATCH_SIZE,
'num_workers': 1,
'shuffle': False
}
# generate_programs('/mnt/fileserver/shared/datasets/CLEVR_v1/data/test_questions_query_ending.h5', program_generator, 'data/test_set', BATCH_SIZE)
# quit()
loader = ClevrDataLoaderH5(**val_loader_kwargs)
dataset_size = loader.dataset_size
NBATCHES = int(dataset_size / BATCH_SIZE)
answers_str = {23: 'metal', 27: 'rubber', 22: 'large', 29: 'sphere', 28: 'small', 19: 'cylinder', 17: 'cube',
30:'yellow', 26:'red', 25: 'purple', 21:'green', 20:'gray', 18:'cyan',16:'brown',15:'blue'}
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def map_ans_binary(answers, out_types):
length = 0
for types in out_types:
length += len(types)
''' Map the answers from the format Justin Johnson et al. use to our own format. '''
# ans_tensor = torch.LongTensor(length).zero_()
ans_tensor = torch.FloatTensor(length).zero_()
# ans_tensor.fill_(0)
k=0
for i in range(len(out_types)):
for j in range(len(out_types[i])):
# print(answers_str[answers[i].item()])
if answers_str[answers[i].item()] in out_types[i][j]:
ans_tensor[k] = 1
# break
k += 1
return ans_tensor
torch.set_grad_enabled(False)
num_correct, num_samples, num_relevant = 0, 0, 0
ib = 0
for batch in loader:
_, _, feats, answers, programs = batch
feats = feats.to(device)
programs = programs.to(device)
outs, out_types = tbd_net.forward_binary(feats, programs)
mapped_ans = map_ans_binary(answers, out_types)
preds = torch.FloatTensor(mapped_ans.shape[0]).zero_()
k = 0
for f in outs:
p = []
for i in range(f.shape[0]):
p.append(float(torch.sum(f[i]).to('cpu')))
imax = np.argmax(np.array(p))
preds[k + imax] = 1.
k += f.shape[0]
# outs = torch.sigmoid(outs)
# preds = (outs.to('cpu') > 0.5).type(torch.FloatTensor)
# preds = preds.view(outs.shape[0])
acc = mapped_ans * preds
batch_num_correct = float(acc.sum())
num_correct += batch_num_correct
batch_num_samples = float(preds.size(0))
num_samples += batch_num_samples
batch_num_relevant = float(mapped_ans.sum())
num_relevant += batch_num_relevant
ib += 1
sys.stdout.write('\r \rBatch: {}/{} Recall: {:.4f}/{:.4f} - {:.4f}'.format(ib, NBATCHES,
batch_num_correct,
batch_num_relevant,
batch_num_correct / batch_num_relevant))
print('\nTotal accuracy: {} / {} ({}%) Recall: {}'.format(num_correct, num_samples, 100 * num_correct / num_samples,
num_correct / num_relevant))
|
from django.shortcuts import render
# Create your views here.
from rest_framework import viewsets, permissions
from Notes.models import Term, ClassNote
from Notes.serializers import TermSerializer, CourseSerializer
class TermViewSet(viewsets.ModelViewSet):
serializer_class = TermSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly)
def get_queryset(self):
active_user = self.request.user
if active_user.is_authenticated:
queryset = Term.objects.filter(user=active_user)
else:
queryset = Term.objects.none()
return queryset
class CourseViewSet(viewsets.ModelViewSet):
serializer_class = CourseSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly)
def get_queryset(self):
active_user = self.request.user
if active_user.is_authenticated:
queryset = ClassNote.objects.filter(user=active_user)
else:
queryset = ClassNote.objects.none()
return queryset
|
import time
import sys
from colored import fg, attr
from PyQt5 import QtWidgets, QtGui
from base import Ui_MainWindow
from Logic import SteganoMode, BlatantMode
class Logger:
@staticmethod
def info(msg):
print(f"{fg(225) + attr(1)}[{time.strftime('%Y-%m-%d %H:%M.%S', time.localtime())}] (Info) --> {attr(0) + fg(225)}{msg}{fg(0)}")
@staticmethod
def debug(msg):
print(f"{fg(40) + attr(1)}[{time.strftime('%Y-%m-%d %H:%M.%S', time.localtime())}] (Debug) --> {attr(0) + fg(40)}{msg}{fg(0)}")
@staticmethod
def warning(msg):
print(f"{fg(220) + attr(1)}[{time.strftime('%Y-%m-%d %H:%M.%S', time.localtime())}] (Warning!) --> {attr(0) + fg(220)}{msg}{fg(0)}")
@staticmethod
def error(msg):
print(f"{fg(9) + attr(1)}[{time.strftime('%Y-%m-%d %H:%M.%S', time.localtime())}] (!Error!) --> {attr(0)+ fg(9)}{msg}{fg(0)}")
class Ui(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
super(Ui, self).__init__()
self.setupUi(self)
Logger.info("Loaded GUI blueprint")
self.setWindowIcon(QtGui.QIcon("key.png"))
self.show()
self.encrypt = self.findChild(QtWidgets.QPushButton, "encrypt")
self.decrypt = self.findChild(QtWidgets.QPushButton, "decrypt")
self.text = self.findChild(QtWidgets.QLineEdit, "text")
self.key = self.findChild(QtWidgets.QLineEdit, "key")
self.image = self.findChild(QtWidgets.QLineEdit, "image")
self.radius_constant = self.findChild(QtWidgets.QSpinBox, "radius_constant_spinbox")
self.avg_radius_constant = self.findChild(QtWidgets.QSpinBox, "avg_radius_constant_spinbox")
self.max_rejects_constant = self.findChild(QtWidgets.QSpinBox, "max_rejects_constant_spinbox")
self.blatant_radiobtn = self.findChild(QtWidgets.QRadioButton, "mode_button_1")
self.steganos_radiobtn = self.findChild(QtWidgets.QRadioButton, "mode_button_2")
Logger.info("Referenced useful widgets")
self.encrypt.clicked.connect(self.encryptRoutine)
self.decrypt.clicked.connect(self.decryptRoutine)
self.findChild(QtWidgets.QAction, "actionHelp").triggered.connect(self.help)
self.findChild(QtWidgets.QAction, "actionContact").triggered.connect(self.contact)
Logger.info("Connected widget events")
def contact(self):
self.showDialog(QtWidgets.QMessageBox.Information,
"Steganos - Contacts",
"Website: www.matteoleggio.it\n"
"Instagram: @zent3600\n"
"Reddit: u/ZenT3600\n"
"Telegram: @ZenT3600")
def help(self):
self.showDialog(QtWidgets.QMessageBox.Information,
"Steganos - Help",
"Steganos\n"
"Hidden in plain sight\n"
"\n"
"----------\n"
"\n"
"Modes:\n"
"-----\n"
"Blatant --> Colored pixels on black background\n"
"Steganos --> Actual hidden image\n"
"\n\n"
"Settings (Only touch if experienced):\n"
"-----\n"
"Radius Constant --> The safe radius between every encrypted pixel\n"
"Avg. Radius Constant --> The radius to get the estimated average color of a pixel from\n"
"Max Rejects --> The max number of times the program can be rejected before giving up")
def showDialog(self, icon, title, body):
msg = QtWidgets.QMessageBox()
msg.setIcon(icon)
msg.setText(body)
msg.setWindowTitle(title)
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.exec_()
def encryptRoutine(self):
Logger.info("Entered encrypting routine")
try:
Logger.debug(f"Image: {self.image.text()}\n"
f"\t\t\tKey: {self.key.text()}\n"
f"\t\t\tText: {self.text.text()}")
Logger.debug(f"Image Type: {type(self.image.text())}\n"
f"\t\t\tKey Type: {type(self.key.text())}\n"
f"\t\t\tText Type: {type(self.text.text())}")
Logger.debug(f"Settings: {self.max_rejects_constant.value()}\n"
f"\t\t\t{self.radius_constant.value()}\n"
f"\t\t\t{self.avg_radius_constant.value()}")
if self.text.text():
if self.blatant_radiobtn.isChecked():
Logger.info("Blatant mode selected")
c = BlatantMode.Steganos()
else:
Logger.info("Stegano mode selected")
if not self.key.text():
Logger.warning("No key given")
self.showDialog(QtWidgets.QMessageBox.Warning,
"Error!",
f"No key given")
return
if self.avg_radius_constant.value() > self.radius_constant.value():
self.showDialog(QtWidgets.QMessageBox.Warning,
"Error!",
f"Avg. Radius Constant can't be higher than Radius Constant")
return
c = SteganoMode.Steganos(str(self.image.text()), str(self.key.text()),
int(self.max_rejects_constant.value()),
int(self.radius_constant.value()),
int(self.avg_radius_constant.value()))
file = c.cypherRoutine(self.text.text())
Logger.debug(f"Generated file: {file}")
self.showDialog(QtWidgets.QMessageBox.Information, "Success!", f"New Hidden Image: {file}")
else:
Logger.warning("No text given")
self.showDialog(QtWidgets.QMessageBox.Warning,
"Error!",
f"No text given")
except Exception as e:
self.showDialog(QtWidgets.QMessageBox.Critical, "Critical!", f"Error: {e}")
def decryptRoutine(self):
Logger.info("Entered decrypting routine")
try:
Logger.debug(f"Image: {self.image.text()}\n"
f"\t\t\tKey: {self.key.text()}\n"
f"\t\t\tText: {self.text.text()}")
Logger.debug(f"Image Type: {type(self.image.text())}\n"
f"\t\t\tKey Type: {type(self.key.text())}\n"
f"\t\t\tText Type: {type(self.text.text())}")
Logger.debug(f"Settings: {self.max_rejects_constant.value()}\n"
f"\t\t\t{self.radius_constant.value()}\n"
f"\t\t\t{self.avg_radius_constant.value()}")
if self.blatant_radiobtn.isChecked():
Logger.info("Blatant mode selected")
if not self.image.text():
Logger.warning("No image given")
self.showDialog(QtWidgets.QMessageBox.Warning,
"Error!",
f"No image given")
return
c = BlatantMode.Steganos(self.image.text())
else:
Logger.info("Stegano mode selected")
if not self.key.text():
Logger.warning("No key given")
self.showDialog(QtWidgets.QMessageBox.Warning,
"Error!",
f"No key given")
return
if self.avg_radius_constant.value() > self.radius_constant.value():
self.showDialog(QtWidgets.QMessageBox.Warning,
"Error!",
f"Avg. Radius Constant can't be higher than Radius Constant")
return
c = SteganoMode.Steganos(str(self.image.text()), str(self.key.text()),
int(self.max_rejects_constant.value()),
int(self.radius_constant.value()),
int(self.avg_radius_constant.value()))
text = c.decipherRoutine()
Logger.debug(f"Hidden text: {text}")
self.showDialog(QtWidgets.QMessageBox.Information, "Success!", f"Text: {text}")
except Exception as e:
self.showDialog(QtWidgets.QMessageBox.Critical, "Critical!", f"Error: {e}")
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Ui()
sys.exit(app.exec_())
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
__author__ = 'Vmture'
from selenium import webdriver
import re
import csv
import time
import os
import codecs
from create_folders import create_folders
from download_apk import download_apk
with open('./urls.json', 'r') as urls:
xiaomi_url = eval(urls.read())['xiaomi_url']
xiaomi_tmp = '/tmp/tmp_xiaomi.txt'
def get_page_source():
driver = webdriver.PhantomJS(executable_path='./phantomjs')
driver.get(xiaomi_url)
page_source = driver.page_source
driver.quit()
with open(xiaomi_tmp, 'wb') as tmp:
tmp.write(page_source.encode('utf8'))
###拼接更新的csv内容
# def get_base_message(page_source):
def get_base_message(new_upgrate):
new_contents = ''
# base_message = re.compile(r'<ul class=" cf">(.+)</ul><div class="weight-font float-div">').findall(page_source)[0]
with open(xiaomi_tmp, 'r') as f:
F = f.read()
base_message = re.compile(r'<ul class=" cf">(.+)</ul><div class="weight-font float-div">').findall(F)[0]
base_message_a = base_message.split('><')
base_messages = [str(time.ctime())]
for message_num in range(len(base_message_a))[1::2]:
base_messages.append(re.compile(r'>(.+)<').findall(base_message_a[message_num])[0])
if len(new_upgrate) != 0:
for new_content in new_upgrate:
new_contents += new_content+'; '
base_messages.append(new_contents)
return base_messages
####获取更新信息
def get_update_data():
numbers = []
new_upgrate = []
with open(xiaomi_tmp, 'r') as f:
F = f.readlines()
for data in F:
if '</p><h3 class="special-h3' in data:
numbers.append(F.index(data))
####获取本次更新内容
for number in xrange(numbers[0], numbers[1]+1):
if re.compile(r'[\x80-\xff]+').findall(F[number]):
if number == numbers[0]:
new_upgrate.append(re.compile(r'[\x80-\xff]+').findall(F[number])[-1])
else:
new_upgrate.append(re.compile(r'[\x80-\xff]+').findall(F[number])[0])
return new_upgrate
def create_csv(xiaomi_path):
path = os.listdir(xiaomi_path)
message_kinds = ['查询时间', '软件大小', '版本号', '更新时间', '包名', 'appid', '新版特性']
if 'xiaomi.csv' not in path:
with open(xiaomi_path+'/xiaomi.csv', 'wb') as F:
F.write(codecs.BOM_UTF8)
csv_writer = csv.writer(F, dialect='excel')
csv_writer.writerow(message_kinds)
def insert_data(base_messages, xiaomi_path):
old_datas = list(csv.reader(open(xiaomi_path+'/xiaomi.csv', 'r')))
level_number = base_messages[2]
apk_name = base_messages[4]+'_'+level_number+'.apk'
if old_datas[-1][2] != base_messages[2]:
#根据版本号判断(旧数据最后的版本号与查询时的版本号)
with open(xiaomi_path+'/xiaomi.csv', 'wb') as F:
F.write(codecs.BOM_UTF8)
csv_writer = csv.writer(F, dialect='excel')
for contents in old_datas:
csv_writer.writerow(contents)
csv_writer.writerow(base_messages)
return apk_name
def get_download_url():
with open(xiaomi_tmp, 'r') as tmp:
tmps = tmp.readlines()
for line in tmps:
if '直接下载' in line:
datas_a = line.split('><')
for line in datas_a :
if '直接下载' in line:
datas_b = line
download_path = re.compile(r'a href="(.+)" class="download"').findall(datas_b)[0]
download_url = 'http://app.mi.com' + download_path
return download_url
def run():
article_path = create_folders()
xiaomi_path = article_path+r'/小米'
try:
get_page_source()
create_csv(xiaomi_path)
new = get_update_data()
messages = get_base_message(new)
apk_name = insert_data(messages, xiaomi_path)
download_url = get_download_url()
download_apk(xiaomi_path, apk_name, download_url)
os.remove(xiaomi_tmp)
file_path = xiaomi_path+'/xiaomi.csv'
except:
file_path = '可能网络存在问题,请确定网络稳定的情况下再试.'
return file_path
if __name__ == '__main__':
run() |
firstname = "Olayinka"
lastname = "Idumu"
Age = "28 year"
Address = "No 8, Aduralere quaters of ijoka road Akure"
State_of_Origin = " Ose L.G"
Year_of_birth = "1990"
Nationality = "Nigeria"
Marital_status = "Single"
Religion = "Christainity"
print("Biodata Form")
print("Firstname:",firstname)
print("Lastname:" ,lastname)
print("Age:",Age)
print("Address:",Address)
print("State of Origin",State_of_Origin)
print("Year of birth:",Year_of_birth)
print("Nationionality:",Nationality)
print("Religion:",Religion)
print("Marital Status",Marital_status)
|
from fileinput import input
inpt = [i for i in input()]
lst = [{len([x for x in i if x == j]) for j in set(i)} for i in inpt]
flatlist = [item for x in lst for item in x]
print(len([x for x in flatlist if x == 2]) * len([x for x in flatlist if x == 3]))
for first in inpt:
for snd in inpt:
differences = 0
for (i,l) in enumerate(first):
if snd[i] != l:
differences += 1
if differences == 1:
print(first + snd) |
from django.contrib.auth.models import User
from django.db import transaction
@transaction.atomic
def register_user(username: str, password: str) -> User:
"""
Register user
:param username:
:param password:
:return:
"""
user = User(username=username)
user.set_password(password)
user.save()
return user
|
import random as rm
import math as m
import copy
import os, sys
directory = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(directory, "code"))
sys.path.append(os.path.join(directory, "code", "classes"))
sys.path.append(os.path.join(directory, "code", "algoritmes"))
from datastructuur import data
from helper import helper
from score import score
from pancakeSort import pancakeSort
from survivalOfFittest import populationBased
from strictDonkey import simulatedAnnealing
from BranchandBound import branchAndBound
from steepestDescendValleyAbseiler import steepestDescendValleyAbseiler
from tester import tester
from visualizations import visualize
algorithm = input("\n WELCOME!\n\n"
+ "|--------------------------------------------------------------------"
+ "-------------------|\n"
+ "\n| When you want to run the Pancake Sorting algorithm type in: 1"
+ " |"
+ "\n\n| When you want to run the Survival of the Fittest algorithm "
+ " type in: 2 |"
+ "\n\n| When you want to run the Strict Donkey Hillclimber algorithm"
+ " type in: 3 |"
+ "\n\n| When you want to run the Steepest Descend Valley Abseiler"
+ " algorithm, type in: 4 |"
+ "\n\n| When you want to run the Branch and Bound algorithm,"
+ " type in: 5 |\n"
+ "| !(WARNING CAN TAKE ALL NIGHT...)! "
+ " |"
+ "\n\n| When you want a combined visualization of the results,"
+ " type in: 6 |\n\n"
+ "|--------------------------------------------------------------------"
+ "-------------------|\n"
+ "\n\nType in the algorithm that you want to run: ")
''' PANCAKESORT '''
if algorithm == '1':
default = input("|-------------------------------------------------------"
+ "---------------------------------|\n"
+ "\n| If you want to run the algorithm with default"
+ "parameters, type 'd' |\n\n"
+ "| For running the algorithm on a test-set with 100 random "
+ "genomes of length 25, type 't' |\n\n"
+ "|-------------------------------------------------------"
+ "---------------------------------|\n\n"
+ "\n\nType in how you want to run the algorithm: ")
if default == 'd':
print(pancakeSort(data.mel, data.mir))
elif default == 't':
tester.pancakeTester([*range(1,26)])
''' POPULATION BASED '''
if algorithm == '2':
default = input("|-------------------------------------------------------"
+ "---------------------------------|\n"
+ "\n| If you want to run the algorithm once with the default"
+ " parameters, type 'd' |\n\n"
+ "| For custom parameters, type 'c' "
+ " |\n\n"
+ "| For running the algorithm (with default parameters)"
+ " on a test-set with 100 random |\n"
+ "| genomes of length 25, type 't' "
+ " |\n\n"
+ "| For running the algorithm 100 times"
+ " with (default parameters)"
+ ", type 'dt' |\n\n"
+ "| For running the algorithm on a test-set"
+ " with visualisation, type 'tv' |\n\n"
+ "| For running the algorithm on the Mel set 100 times"
+ " with different parameters and |\n"
+ "| a visualisation, type 'dv' "
+ " |\n\n"
+ "|-------------------------------------------------------"
+ "---------------------------------|\n\n"
+ "\n\nType in how you want to run the algorithm: ")
if default == 'd':
print(populationBased(300, data.mel, data.mir))
elif default == 'c':
popSize = input("|----------------------------------------------------"
+ "------------------------------------|\n"
+ "\n| Define the population size you would like the "
+ "algorithm to use. |\n\n"
+ "| For a population size of 50, type 's' "
+ " |\n\n"
+ "| For a population size of 150, type 'm' "
+ " |\n\n"
+ "| For a population size of 300, type 'l' "
+ " |\n\n"
+ "| For a population size of 500, type 'xl' "
+ " |\n\n"
+ "|----------------------------------------------------"
+ "------------------------------------|\n\n"
+ "Type in which population-size you would like the"
+ " algorithm to use: ")
if popSize == 's':
print(populationBased(50, data.mel, data.mir))
elif popSize == 'm':
print(populationBased(150, data.mel, data.mir))
elif popSize == 'l':
print(populationBased(300, data.mel, data.mir))
elif popSize == 'xl':
print(populationBased(500, data.mel, data.mir))
if default == 't':
tester.populationTester(150, [*range(1,26)])
if default == 'dt':
tester.populationTester(150, data.mel)
#visualize.populationVisualizer()
# else:
# print("Error, unknown input")
if default == 'tv':
visualize.populationVisualizer([*range(1,26)])
if default == 'dv':
visualize.populationVisualizer(data.mel)
''' SIMULATED ANNEALING '''
if algorithm == '3':
default = input("|-------------------------------------------------------"
+ "---------------------------------|\n"
+ "\n| If you want to run the algorithm once with the default"
+ " parameters, type 'd' |\n\n"
+ "| For custom parameters, type 'c' "
+ " |\n\n"
+ "| For running the algorithm (with default parameters)"
+ " on a test-set with 100 random |\n"
+ "| genomes of length 25, type 't' "
+ " |\n\n"
+ "| For running the algorithm 100 times"
+ " with (default parameters)"
+ ", type 'dt' |\n\n"
+ "| For running the algorithm on a test-set"
+ " with visualisation, type 'tv' |\n\n"
+ "| For running the algorithm on the Mel set 100 times"
+ " with different parameters and |\n"
+ "| a visualisation, type 'dv' "
+ " |\n\n"
+ "|-------------------------------------------------------"
+ "---------------------------------|\n\n"
+ "\n\nType in how you want to run the algorithm: ")
if default == 'd':
print(simulatedAnnealing(data.mel, data.mir, 1000,
score.scoreNeighbours))
elif default == 'c':
scoreF = input("|-------------------------------------------------"
+ "---------------------------------------|\n"
+ "\n| Define the score-function you would like to use."
+ " |\n\n"
+ "| For scoreNeighbours, type: 1 "
+ " |\n\n"
+ "| For scoreNeighboursModifier, type: 2 "
+ " |\n\n"
+ "|-------------------------------------------------"
+ "---------------------------------------|\n\n"
+ " Type in which score-function you would like the"
+ " algorithm to use: ")
if scoreF == '1':
failV = input("|-------------------------------------------------"
+ "---------------------------------------|\n"
+ "\n| Define the interval with which the algorithm"
+ " allows lesser mutations to pass. |\n\n"
+ "| For an interval of 1000, type: s "
+ " |\n\n"
+ "| For an interval of 10.000, type: m "
+ " |\n\n"
+ "| For an interval of 100.000, type l "
+ " |\n\n"
+ "|-------------------------------------------------"
+ "---------------------------------------|\n\n"
+ "Type in which interval-size you would like the"
+ " algorithm to use: ")
if failV == 's':
print(simulatedAnnealing(data.mel, data.mir, 1000,
score.scoreNeighbours))
elif failV == 'm':
print(simulatedAnnealing(data.mel, data.mir, 10000,
score.scoreNeighbours))
elif failV == 'l':
print(simulatedAnnealing(data.mel, data.mir, 100000,
score.scoreNeighbours))
else:
print("Error, unknown input")
elif scoreF == '2':
failV = input("|-------------------------------------------------"
+ "---------------------------------------|\n"
+ "\n| Define the interval with which the algorithm"
+ " allows lesser mutations to pass. |\n\n"
+ "| For an interval of 1000, type: s "
+ " |\n\n"
+ "| For an interval of 10.000, type: m "
+ " |\n\n"
+ "| For an interval of 100.000, type l "
+ " |\n\n"
+ "|-------------------------------------------------"
+ "---------------------------------------|\n\n"
+ "Type in which interval-size you would like the"
+ " algorithm to use: ")
if failV == 's':
print(simulatedAnnealing(data.mel, data.mir, 1000,
score.scoreNeighboursModifier))
elif failV == 'm':
print(simulatedAnnealing(data.mel, data.mir, 10000,
score.scoreNeighboursModifier))
elif failV == 'l':
print(simulatedAnnealing(data.mel, data.mir, 100000,
score.scoreNeighboursModifier))
else:
print("Error, unknown input")
else:
print("Error, unknown input")
elif default == 't':
tester.simulatedTester(1000, [*range(1,26)])
elif default == 'dt':
tester.simulatedTester(1000, data.mel)
elif default == 'tv':
visualize.simulatedVisualizer([*range(1,26)])
elif default == 'dv':
visualize.simulatedVisualizer(data.mel)
''' STEEPEST DESCEND VALLEY ABSEILER '''
if algorithm == '4':
visualize.SDVAVisualizer(data.mir, data.mel)
''' BRANCH AND BOUND '''
if algorithm == '5':
print(branchAndBound(2, len(data.mel), data.mir, data.mel))
''' VISUALIZATION OF ALL ALGORITHMS COMBINED '''
if algorithm == '6':
visualize.combinedVisualizer(data.mel, data.mir)
|
num = int(input("Enter a number: "))
temp = num
summation = 0
while num > 0:
i = num%10
summation = summation+1
num = num//10
print("No of digits in",temp,"are",summation)
|
import numpy as np
import cv2
import os
face_cascade = cv2.CascadeClassifier('/home/pi/opencv-3.0.0/data/haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('/home/pi/opencv-3.0.0/data/haarcascades/haarcascade_eye.xml')
#the zero signifies which camera
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
eyes = eye_cascade.detectMultiScale(gray)
for(ex,ey,ew,eh) in eyes:
cv2.rectangle(img,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
roi_gray2 = gray[ey:ey+eh, ex:ex+ew]
roi_color2 = img[ey:ey+eh, ex:ex+ew]
circles = cv2.HoughCircles(roi_gray2,cv2.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)
try:
for i in circles[0,:]:
#draw the outer circle
cv2.circle(roi_color2,(i[0],i[1]),i[2],(255,255,255),2)
print("drawing circle")
#draw the center of the circle
cv2.circle(roi_color2,(i[0],i[1]),2,(255,255,255),3)
except Exception as e:
print e
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
from boto import ec2
class EC2Launcher():
def __init__(self, image_id, n_instances=1, price=0.01, user_data_file="",
region='aws-east-1', instance_type='m3.large',
is_spot_request=True):
self.image_id = image_id
self.n_instances = n_instances
self.price = price
self.is_spot_request= is_spot_request
self.conn = ec2.connect_to_region(region)
if user_data_file:
with open(user_data_file, 'r') as f:
self.user_data = f.read()
else:
self.user_data = None
def launch(self):
if self.is_spot_request:
conn.run_instances(price=self.price,
image_id=self.image_id)
else:
conn.request_spot_instances(price=self.price,
image_id=self.image_id,
user_data=self.user_data)
def terminate(self):
pass # TODO
if __name__ == '__main__':
print 'Testing' |
# hieronder een manier om snel een gitaarneck overzicht te maken me #(kruizen) of b(mollen)
sharps = 'A A# B C C# D D# E F F# G G#'.split() * 3
flats = 'A Bb B C Db D Eb E F Gb G Ab'.split() * 3
def string_builder(notes_type, *args):
""" building the 6 strings with 24 notes each """
temp = []
for arg in args:
temp.append(notes_type[notes_type.index(arg):notes_type.index(arg) + 25])
return temp
# neck = string_builder(sharps, 'E', 'A', 'D', 'G', 'B', 'E')
# for string in neck:
# print(string)
def string_sets():
standard = ['E', 'A', 'D', 'G', 'B', 'E']
set_drop2_top = [standard[0], standard[1], standard[2], standard[3], None, None]
set_drop2_midle = [None, standard[1], standard[2], standard[3], standard[4], None]
set_drop2_bottom = [None, None, standard[2], standard[3], standard[4], standard[5]]
set_drop3_top = [standard[0], None, standard[2], standard[3], standard[4], None]
set_drop3_bottom = [None, standard[1], None, standard[3], standard[4], standard[5]]
set_drop_2and4_top = [standard[0], standard[1], None, standard[3], standard[4], None]
set_drop_2and4_bottom = [None, standard[1], standard[2], None, standard[4], standard[5]]
# for i in strings_builder(flats):
# print(i)
|
from mattermostdriver import Driver
from coffeebot import config, utils
def main():
print("Creating Mattermost Driver...")
driver_options = {
'url': config.URL,
'login_id': config.USERNAME,
'password': config.PASSWORD,
'port': config.PORT
}
driver = Driver(driver_options)
print("Authenticating...")
driver.login()
driver.users.get_user('me')
print("Successfully authenticated.")
print("Retrieving Coffee Buddies participants...")
team_name = config.TEAM_NAME
channel_name = config.CHANNEL_NAME
members = utils.get_channel_members(driver, team_name, channel_name)
print("Successfully retrieved Coffee Buddies participants.")
print("Preparing participants database...")
utils.create_users(members)
utils.create_pairs(members)
print("Succesfully prepared participants database.")
print("Pairing Coffee Buddies participants...")
pairs = utils.get_pairs(members)
print("Successfully paired Coffee Buddies participants.")
print("Messaging paired Coffee Buddies participants...")
utils.message_pairs(driver, pairs)
print("Successfully messaged paired Coffee Buddies participants.")
if __name__ == '__main__':
main()
|
# This is a complete example that computes histogram for each region of a volume defined by a segment.
# This script requires installation of SegmentEditorExtraEffects extension, as it uses the Split volume effect,
# which is provided by this extension.
import os
import vtk, qt, ctk, slicer
import logging
from SegmentEditorEffects import *
import vtkSegmentationCorePython as vtkSegmentationCore
import sitkUtils
import SimpleITK as sitk
class SegmentEditorEffect(AbstractScriptedSegmentEditorEffect):
"""This effect creates a volume for each segment, cropped to the segment extent with optional padding."""
def __init__(self, scriptedEffect):
scriptedEffect.name = 'Split volume'
scriptedEffect.perSegment = True # this effect operates on a single selected segment
AbstractScriptedSegmentEditorEffect.__init__(self, scriptedEffect)
#Effect-specific members
self.buttonToOperationNameMap = {}
def clone(self):
# It should not be necessary to modify this method
import qSlicerSegmentationsEditorEffectsPythonQt as effects
clonedEffect = effects.qSlicerSegmentEditorScriptedEffect(None)
clonedEffect.setPythonSource(__file__.replace('\\','/'))
return clonedEffect
def icon(self):
# It should not be necessary to modify this method
iconPath = os.path.join(os.path.dirname(__file__), 'SegmentEditorEffect.png')
if os.path.exists(iconPath):
return qt.QIcon(iconPath)
return qt.QIcon()
def helpText(self):
return """<html>Create a volume node for each segment, cropped to the segment extent. Cropping is applied to the master volume by default. Optionally, padding can be added to the output volumes.<p>
</html>"""
def setupOptionsFrame(self):
# input volume selector
self.inputVolumeSelector = slicer.qMRMLNodeComboBox()
self.inputVolumeSelector.nodeTypes = ["vtkMRMLScalarVolumeNode"]
self.inputVolumeSelector.selectNodeUponCreation = True
self.inputVolumeSelector.addEnabled = True
self.inputVolumeSelector.removeEnabled = True
self.inputVolumeSelector.noneEnabled = True
self.inputVolumeSelector.noneDisplay = "(Master volume)"
self.inputVolumeSelector.showHidden = False
self.inputVolumeSelector.setMRMLScene(slicer.mrmlScene)
self.inputVolumeSelector.setToolTip("Volume to split. Default is current master volume node.")
self.inputVolumeSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.onInputVolumeChanged)
inputLayout = qt.QHBoxLayout()
inputLayout.addWidget(self.inputVolumeSelector)
self.scriptedEffect.addLabeledOptionsWidget("Input Volume: ", inputLayout)
# Pad size
self.pad = qt.QSpinBox()
self.pad.setToolTip("Choose the number of voxels used to pad the image in each dimension")
self.pad.minimum = 0
self.pad.maximum = 1000
self.padLabel = qt.QLabel("Pad voxels: ")
# Fill value layouts
# addWidget(*Widget, row, column, rowspan, colspan)
padValueLayout = qt.QFormLayout()
padValueLayout.addRow(self.padLabel, self.pad)
self.scriptedEffect.addOptionsWidget(padValueLayout)
self.fillValue = qt.QSpinBox()
self.fillValue.setToolTip("Choose the voxel intensity that will be used to pad the output volumes.")
self.fillValue.minimum = -32768
self.fillValue.maximum = 65535
self.fillValue.value=0
self.fillValueLabel = qt.QLabel("Fill value: ")
fillValueLayout = qt.QFormLayout()
fillValueLayout.addRow(self.fillValueLabel, self.fillValue)
self.scriptedEffect.addOptionsWidget(fillValueLayout)
# Apply button
self.applyButton = qt.QPushButton("Apply")
self.applyButton.objectName = self.__class__.__name__ + 'Apply'
self.applyButton.setToolTip("Generate volumes for each segment, cropped to the segment extent. No undo operation available once applied.")
self.scriptedEffect.addOptionsWidget(self.applyButton)
self.applyButton.connect('clicked()', self.onApply)
def createCursor(self, widget):
# Turn off effect-specific cursor for this effect
return slicer.util.mainWindow().cursor
def updateGUIFromMRML(self):
inputVolume = self.inputVolumeSelector.currentNode()
if inputVolume is None:
inputVolume = self.scriptedEffect.parameterSetNode().GetMasterVolumeNode()
masterVolume = self.scriptedEffect.parameterSetNode().GetMasterVolumeNode()
def getInputVolume(self):
inputVolume = self.inputVolumeSelector.currentNode()
if inputVolume is None:
inputVolume = self.scriptedEffect.parameterSetNode().GetMasterVolumeNode()
return inputVolume
def onInputVolumeChanged(self):
self.updateGUIFromMRML()
def onApply(self):
inputVolume = self.getInputVolume()
segmentID = self.scriptedEffect.parameterSetNode().GetSelectedSegmentID()
segmentationNode = self.scriptedEffect.parameterSetNode().GetSegmentationNode()
volumesLogic = slicer.modules.volumes.logic()
scene = inputVolume.GetScene()
padExtent = [-self.pad.value, self.pad.value, -self.pad.value, self.pad.value, -self.pad.value, self.pad.value]
#iterate over segments
for segmentIndex in range(segmentationNode.GetSegmentation().GetNumberOfSegments()):
segmentID = segmentationNode.GetSegmentation().GetNthSegmentID(segmentIndex)
segmentIDs = vtk.vtkStringArray()
segmentIDs.InsertNextValue(segmentID)
# create volume for output
outputVolumeName = inputVolume.GetName() + '_' + segmentID
outputVolume = volumesLogic.CloneVolumeGeneric(scene, inputVolume, outputVolumeName, False)
# crop segment
slicer.app.setOverrideCursor(qt.Qt.WaitCursor)
import SegmentEditorMaskVolumeLib
SegmentEditorMaskVolumeLib.SegmentEditorEffect.maskVolumeWithSegment(self,segmentationNode, segmentID, "FILL_OUTSIDE", [0], inputVolume, outputVolume)
#calculate extent of masked image
pt=[-16.87524999999998,19.68725000000002,16.80000000000001,1]
rasToIjk = vtk.vtkMatrix4x4()
outputVolume.GetRASToIJKMatrix(rasToIjk)
print(rasToIjk.MultiplyPoint(pt))
ijkToRas = vtk.vtkMatrix4x4()
outputVolume.GetIJKToRASMatrix(ijkToRas)
print(ijkToRas.MultiplyPoint(pt))
cropThreshold = 0
img = slicer.modules.segmentations.logic().CreateOrientedImageDataFromVolumeNode(outputVolume)
img.UnRegister(None)
extent=[0,0,0,0,0,0]
vtkSegmentationCore.vtkOrientedImageDataResample.CalculateEffectiveExtent(img, extent, cropThreshold)
# pad and crop
cropFilter = vtk.vtkImageConstantPad()
cropFilter.SetInputData(outputVolume.GetImageData())
cropFilter.SetConstant(self.fillValue.value)
cropFilter.SetOutputWholeExtent(extent)
cropFilter.Update()
padFilter = vtk.vtkImageConstantPad()
padFilter.SetInputData(cropFilter.GetOutput())
padFilter.SetConstant(self.fillValue.value)
for i in range(len(extent)):
extent[i]=extent[i]+padExtent[i]
padFilter.SetOutputWholeExtent(extent)
padFilter.Update()
outputVolume.SetAndObserveImageData(padFilter.GetOutput())
qt.QApplication.restoreOverrideCursor()
|
from game import *
from actor import *
#from object import *
class Object(Entity):
#Constructor
nb_cnstrctArg = 1
def __init__(self, args): #name,pos,dim,health
super().__init__(args[:-super().nb_cnstrctArg])
self.vars = args[super().nb_cnstrctArg:] |
from __future__ import annotations
import enum
__all__ = (
"Sentinel",
"Undefined",
"sentinel",
"undefined",
)
class Sentinel(enum.Enum):
"""
A special type to represent a special value to indicate closing/shutdown of queues.
"""
token = 0
class Undefined(enum.Enum):
"""
A special type to represent an undefined value.
"""
token = 0
sentinel = Sentinel.token
undefined = Undefined.token
|
# 冻结和微调
# VGG19模型训练网络
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam,SGD
import numpy as np
from keras.applications import ResNet50,VGG19
import matplotlib.pyplot as plt
# tf.test.gpu_device_name()
model_name = "FrozenAndFinetuning" # 模块命名,用于绘图时
train_epochs0 = 3 # 设置冻结训练轮次
train_epochs1 = 7 # 设置微调训练轮次
def show_history_mse2(history0,history1): # 绘制mse图像
plt.plot(history0.history['loss']+history1.history['loss'])
plt.plot(history0.history['val_loss']+history1.history['val_loss'])
plt.title(model_name+' mse')
plt.ylabel('loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('drive/app/'+model_name+'_mse.jpg',dpi=200)
plt.show()
print(history0.history['loss']+history1.history['loss'])
print(history0.history['val_loss']+history1.history['val_loss'])
X = np.load('drive/app/X_data.npy')
Y = np.load('drive/app/Y_data.npy')
# 统一X和Y的数量级
X = X / 25
print("read the data")
# 切片,统一数量级
x_train = X[:5000]
y_train = Y[:5000]
x_test = X[5000:]
y_test = Y[5000:]
print("train data and test data")
#resnet = ResNet50(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
resnet=VGG19(include_top=False, weights='imagenet', input_shape=(220, 220, 3), pooling='avg')
model = Sequential()
model.add(resnet)
model.add(Dense(1,name="aaa"))
# 冻结------------------------------------------
print("Frozen!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# 设置ResNet50不可训练
model.layers[0].trainable = False
# print(resnet.summary())
print(model.summary())
print("compile")
model.compile(loss='mean_squared_error', optimizer=Adam())
print("fit")
Hist = model.fit(x_train, y_train, epochs=train_epochs0, batch_size=64, validation_data=(x_test, y_test))
model.save_weights('drive/app/weight.h5')
print(Hist.history)
# 微调---------------------------------------------
print("Finetuning!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# 设置ResNet50可训练
model2=Sequential()
model2.add(resnet)
model2.add(Dense(1,name="aaa"))
model2.load_weights('drive/app/weight.h5',by_name=True)
for layer in model2.layers:
layer.trainable = True
print(model2.summary())
print("compile")
model2.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0002))
print("fit")
Hist2 = model2.fit(x_train, y_train, epochs=train_epochs1, batch_size=64, validation_data=(x_test, y_test))
print(Hist2.history)
# 输出图像---------------------------------------------
show_history_mse2(Hist,Hist2)
# show_history_ce(Hist)
# loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)
# print(loss_and_metrics)
model2.save('drive/app/'+model_name+'_model.h5')
del X
del Y
del x_train
del y_train
del x_test
del y_test
|
import cv2 as cv
img = cv.imread('irene.jpg', cv.IMREAD_UNCHANGED)
cv.namedWindow('img') # window를 생성시킴
cv.moveWindow('img', 0, 0) # 표시되는 창의 위치를 옮김
cv.imshow('img', img) # 'img'라는 윈도우가 없으면 새로 만든다. 있으면 윈도우에 img를 표시
cv.waitKey()
cv.moveWindow('img', 400, 400) # 버튼을 한 번 누르면 창을 이동시킴
cv.waitKey()
cv.destroyAllWindows()
# normal과 autosize의 차이
cv.namedWindow('img-autosize', cv.WINDOW_AUTOSIZE) # resize 불가능
cv.namedWindow('img-normal', cv.WINDOW_NORMAL) # resize 가능
cv.resizeWindow('img-autosize', 700, 700) # 한 번 창이 생성되면 창의 크기조절이 불가능하다.
cv.resizeWindow('img-normal', 700, 700)
cv.imshow('img-autosize', img)
cv.imshow('img-normal', img)
cv.waitKey()
cv.destroyAllWindows()
|
from flask import Flask, render_template, request, redirect, session, flash, jsonify
from mysqlconnection import connectToMySQL
import re
import base64
import os
#from datetime import datetime
from flask_bcrypt import Bcrypt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet
from yubico_client.py3 import b
from werkzeug.utils import secure_filename
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
DATABASE = 'dojo_messages'
app = Flask(__name__)
app.secret_key = "Blahzay Blahzay"
bcrypt = Bcrypt(app)
UPLOAD_FOLDER = 'static/images'
ALLOWED_EXTENSIONS = {"jpeg", "png", "jpg"}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def index():
return render_template("index.html")
@app.route('/register', methods=['POST'])
def on_register():
is_valid = True
if len(request.form['fname']) < 2:
is_valid = False
flash("Please enter a first name of at least 2 characters")
if len(request.form['lname']) < 2:
is_valid = False
flash("Please enter a last name of at least 2 characters")
if len(request.form['email']) < 1:
is_valid = False
flash("Email cannot be blank", 'email')
elif not EMAIL_REGEX.match(request.form['email']):
is_valid = False
flash("Invalid email address!", 'email')
else:
db = connectToMySQL(DATABASE)
data = {
"em": request.form['email']
}
result = db.query_db("SELECT * FROM users WHERE email = %(em)s",data)
if len(result) > 0:
flash("This email address is already registered.")
is_valid = False
if len(request.form['pass']) < 8:
is_valid = False
flash("Please enter a password of at least 8 characters")
if request.form['pass'] != request.form['cpass']:
is_valid = False
flash("Passwords do not match")
if is_valid:
# include some logic to validate user input before adding them to the database!
# create the hash
pw_hash = bcrypt.generate_password_hash(request.form['pass'])
# print(pw_hash)
# prints something like b'$2b$12$sqjyok5RQccl9S6eFLhEPuaRaJCcH3Esl2RWLm/cimMIEnhnLb7iC'
# be sure you set up your database so it can store password hashes this long (63 characters)
email_provided = request.form['email'] # This is input in the form of a string
email = email_provided.encode() # Convert to type bytes
salt = b(os.urandom(30)) # CHANGE THIS - recommend using a key from os.urandom(16), must be of type bytes
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
ukey = base64.urlsafe_b64encode(kdf.derive(email))
print(ukey)
mysql = connectToMySQL(DATABASE)
query = """INSERT INTO users
(first_name, last_name, email, password, created_at, updated_at)
VALUES (%(fn)s, %(ln)s, %(em)s, %(pw)s, NOW(), NOW());"""
data = {
"fn": request.form["fname"],
"ln": request.form["lname"],
"em": request.form["email"],
"pw": pw_hash
}
session['user_id'] = mysql.query_db(query, data)
# add user to database
# display success message
# flash("User successfully added")
mysql = connectToMySQL(DATABASE)
query = "INSERT INTO dojo_messages.keys (user_id, user_key) VALUES (%(u_id)s, %(key)s);"
data = {
"u_id": session['user_id'],
"key": ukey
}
mysql.query_db(query, data)
return redirect('/dashboard')
return redirect('/')
@app.route("/login", methods=["POST"])
def on_login():
is_valid = True
if len(request.form['email']) < 1:
is_valid = False
flash("Email cannot be blank")
elif not EMAIL_REGEX.match(request.form['email']): # test whether a field matches the pattern
is_valid = False
flash("Invalid email address!", 'email')
if is_valid:
mysql = connectToMySQL(DATABASE)
query = "SELECT user_id, email, password FROM users WHERE email = %(em)s"
data = {
"em": request.form['email']
}
user_data = mysql.query_db(query, data)
if user_data:
user = user_data[0]
if bcrypt.check_password_hash(user_data[0]['password'], request.form['pass']):
session['user_id'] = user['user_id']
# verify password
# print(user_data)
return redirect("/dashboard")
else:
flash("Email/Password combo is invalid")
return redirect("/")
else:
flash("Please register first")
return redirect("/")
else:
return redirect("/")
@app.route('/logout')
def on_logout():
session.clear()
return redirect("/")
@app.route('/dashboard')
def on_messages_dashboard():
if 'user_id' not in session:
return redirect('/')
mysql = connectToMySQL(DATABASE)
query = "SELECT first_name, last_name, avatar, bio FROM users WHERE user_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
user_data = mysql.query_db(query, data)
if user_data:
user_data = user_data[0]
mysql = connectToMySQL(DATABASE)
query = """SELECT *,
COUNT(message_like_id) AS likes
FROM messages
JOIN users ON messages.author_id = users.user_id
LEFT JOIN user_likes
ON messages.message_id = user_likes.message_like_id
GROUP BY messages.message_id ORDER BY messages.message_id DESC"""
whispers = mysql.query_db(query, data)
mysql = connectToMySQL(DATABASE)
query = "SELECT followed_id FROM followers WHERE follower_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
followed_users = mysql.query_db(query, data)
followed_ids = [data['followed_id'] for data in followed_users]
print(followed_ids)
mysql = connectToMySQL(DATABASE)
query = "SELECT follower_id FROM followers WHERE followed_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
follower_users = mysql.query_db(query, data)
follower_ids = [data['follower_id'] for data in follower_users]
print(follower_ids)
mysql = connectToMySQL(DATABASE)
query = "SELECT users.user_id, users.first_name, users.last_name FROM users WHERE users.user_id != %(u_id)s"
data = {
'u_id': session['user_id']
}
users = mysql.query_db(query, data)
mysql = connectToMySQL(DATABASE)
query = "SELECT user_key FROM dojo_messages.keys WHERE user_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
key_data = mysql.query_db(query, data)
if key_data:
key_data = key_data[0]
# mysql = connectToMySQL(DATABASE)
# query = """SELECT messages.author_id, messages.message_id, messages.message, dojo_messages.keys.user_key, users.first_name, users.last_name, users.user_id
# FROM messages
# JOIN dojo_messages.keys
# ON messages.author_id = dojo_messages.keys.user_id
# JOIN users ON messages.author_id = users.user_id
# LEFT JOIN user_likes
# ON messages.message_id = user_likes.message_like_id"""
# # ORDER BY messages.message_id DESC"""
# dec_whispers = mysql.query_db(query, data)
following_followed = [session['user_id']]
if follower_ids:
for j in follower_ids:
if j in followed_ids:
is_okay = True
following_followed.append(j)
print(True)
else:
is_okay = False
dec_whispers = []
print(False)
else:
is_okay = False
dec_whispers = []
print(False)
print(f"Annie are you okay? {is_okay}")
# if is_okay:
# for i in following_followed:
print(f"following_followed iteration: {following_followed}")
mysql = connectToMySQL(DATABASE)
query = """SELECT messages.author_id, messages.message_id, messages.message, dojo_messages.keys.user_key, users.first_name, users.last_name, users.user_id
FROM messages
JOIN dojo_messages.keys
ON messages.author_id = dojo_messages.keys.user_id
JOIN users ON messages.author_id = users.user_id
LEFT JOIN user_likes
ON messages.message_id = user_likes.message_like_id"""
# WHERE users.user_id = %(u_id)s OR users.user_id = %(s_id)s"""
# ORDER BY messages.message_id DESC"""
# data = {
# 'u_id': following_followed,
# 's_id': session['user_id']
# }
dec_whispers = mysql.query_db(query, data)
# print(dec_whispers)
for k in dec_whispers:
# print(following_followed)
# print(k['author_id'])
# print(type(k['author_id']))
if k['author_id'] in following_followed:
print(k)
# print(dec_whispers)
# print(k['user_key'])
key = (k['user_key'])
f = Fernet(key)
k['message'] = f.decrypt(b(k['message']), ttl=None)
k['message'] = k['message'].decode("utf-8")
mysql = connectToMySQL(DATABASE)
query = "SELECT user_key FROM dojo_messages.keys WHERE user_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
key_data = mysql.query_db(query, data)
if key_data:
key_data = key_data[0]
return render_template("dashboard.html", user_data=user_data, whispers=whispers, key_data=key_data, dec_whispers=dec_whispers, followed_ids=followed_ids)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload_img', methods=['GET', 'POST'])
def upload_img():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('Please select a file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
mysql = connectToMySQL(DATABASE)
query = "UPDATE users SET avatar = %(av)s WHERE user_id = %(u_id)s"
data = {
"av": filename,
"u_id": session['user_id']
}
mysql.query_db(query, data)
return redirect(request.url)
return redirect('/dashboard')
@app.route('/update_bio')
def update_bio():
mysql = connectToMySQL(DATABASE)
query = "SELECT bio FROM users WHERE user_id = %(u_id)s"
data = {
"u_id": session['user_id']
}
bio = mysql.query_db(query, data)
return render_template("update_bio.html", bio = bio)
@app.route('/edit_bio', methods=['POST'])
def edit_bio():
mysql = connectToMySQL(DATABASE)
query = "UPDATE users SET bio = %(bio)s WHERE user_id = %(u_id)s"
data = {
"bio": request.form['bio'],
"u_id": session['user_id']
}
mysql.query_db(query, data)
return redirect('/dashboard')
@app.route("/dashboard/update_bio", methods=["POST"])
def update_bio_api():
mysql = connectToMySQL(DATABASE)
query = "UPDATE users SET bio = %(bio)s WHERE user_id = %(u_id)s"
data = {
"bio": request.form['bio'],
"u_id": session['user_id']
}
mysql.query_db(query, data)
return jsonify({"success": "Bio has been successfully updated!"})
@app.route('/write_whisper', methods=['POST'])
def on_add_whisper():
if 'user_id' not in session:
return redirect("/")
is_valid = True
if len(request.form['a_whisper']) < 5:
is_valid = False
flash("Whisper must be at least 5 characters.")
if is_valid:
mysql = connectToMySQL(DATABASE)
query = "SELECT user_key FROM dojo_messages.keys WHERE user_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
key_data = mysql.query_db(query, data)
if key_data:
key_data = key_data[0]
key = b(key_data['user_key'])
crypt_message = request.form['a_whisper'].encode()
f = Fernet(key)
encrypted_message = f.encrypt(crypt_message)
# print(f"this is an {encrypted_message} message!!!!!")
mysql = connectToMySQL(DATABASE)
query = "INSERT INTO messages (message, author_id, created_at, updated_at) VALUES (%(msg)s, %(a_id)s, NOW(), NOW())"
data = {
'msg': encrypted_message,
'a_id': session['user_id']
}
mysql.query_db(query, data)
return redirect('/dashboard')
@app.route("/delete_whisper/<message_id>")
def on_delete(message_id):
mysql = connectToMySQL(DATABASE)
query = "DELETE FROM messages WHERE message_id = %(m_id)s AND author_id = %(u_id)s"
data = {
'm_id': message_id,
'u_id': session['user_id']
}
mysql.query_db(query, data)
return redirect("/dashboard")
@app.route("/ninjas")
def users_to_follow():
mysql = connectToMySQL(DATABASE)
query = "SELECT followed_id FROM followers WHERE follower_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
followed_users = mysql.query_db(query, data)
followed_ids = [data['followed_id'] for data in followed_users]
mysql = connectToMySQL(DATABASE)
query = "SELECT users.user_id, users.first_name, users.last_name, users.avatar FROM users WHERE users.user_id != %(u_id)s"
data = {
'u_id': session['user_id']
}
users = mysql.query_db(query, data)
# print(users)
return render_template("/follow.html", users=users, followed_ids=followed_ids)
@app.route("/follow/<user_id>")
def follow_this_user_dashboard(user_id):
mysql = connectToMySQL(DATABASE)
query = "INSERT INTO followers (follower_id, followed_id) VALUES (%(folwr)s, %(folwd)s)"
data = {
'folwr': session['user_id'],
'folwd': user_id
}
mysql.query_db(query, data)
return redirect("/dashboard")
@app.route("/unfollow/<f_id>")
def on_unfollow_dashboard(f_id):
mysql = connectToMySQL(DATABASE)
query = "DELETE FROM followers WHERE follower_id = %(u_id)s AND followed_id = %(f_id)s"
data = {
'u_id': session['user_id'],
'f_id': f_id
}
mysql.query_db(query, data)
return redirect("/dashboard")
@app.route("/follow_user/<user_id>")
def follow_this_user(user_id):
mysql = connectToMySQL(DATABASE)
query = "INSERT INTO followers (follower_id, followed_id) VALUES (%(folwr)s, %(folwd)s)"
data = {
'folwr': session['user_id'],
'folwd': user_id
}
mysql.query_db(query, data)
return redirect("/ninjas")
@app.route("/unfollow_user/<f_id>")
def on_unfollow(f_id):
mysql = connectToMySQL(DATABASE)
query = "DELETE FROM followers WHERE follower_id = %(u_id)s AND followed_id = %(f_id)s"
data = {
'u_id': session['user_id'],
'f_id': f_id
}
mysql.query_db(query, data)
return redirect("/ninjas")
@app.route("/user_profile/<user_id>")
def user_profile(user_id):
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM users WHERE user_id = %(u_id)s;"
data = {
'u_id': user_id
}
user_data = mysql.query_db(query, data)
mysql = connectToMySQL(DATABASE)
query = """SELECT *,
COUNT(message_like_id) AS likes
FROM messages
JOIN users ON messages.author_id = users.user_id
LEFT JOIN user_likes
ON messages.message_id = user_likes.message_like_id
GROUP BY messages.message_id ORDER BY messages.message_id DESC"""
whispers = mysql.query_db(query, data)
mysql = connectToMySQL(DATABASE)
query = "SELECT followed_id FROM followers WHERE follower_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
followed_users = mysql.query_db(query, data)
followed_ids = [data['followed_id'] for data in followed_users]
print(followed_ids)
mysql = connectToMySQL(DATABASE)
query = "SELECT follower_id FROM followers WHERE followed_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
follower_users = mysql.query_db(query, data)
follower_ids = [data['follower_id'] for data in follower_users]
print(follower_ids)
mysql = connectToMySQL(DATABASE)
query = "SELECT users.user_id, users.first_name, users.last_name, users.avatar FROM users WHERE users.user_id != %(u_id)s"
data = {
'u_id': session['user_id']
}
users = mysql.query_db(query, data)
mysql = connectToMySQL(DATABASE)
query = "SELECT user_key FROM f8x0a94mtjmenwxa.keys WHERE user_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
key_data = mysql.query_db(query, data)
if key_data:
key_data = key_data[0]
# mysql = connectToMySQL(DATABASE)
# query = """SELECT messages.author_id, messages.message_id, messages.message, f8x0a94mtjmenwxa.keys.user_key, users.first_name, users.last_name, users.user_id
# FROM messages
# JOIN f8x0a94mtjmenwxa.keys
# ON messages.author_id = f8x0a94mtjmenwxa.keys.user_id
# JOIN users ON messages.author_id = users.user_id
# LEFT JOIN user_likes
# ON messages.message_id = user_likes.message_like_id"""
# # ORDER BY messages.message_id DESC"""
# dec_whispers = mysql.query_db(query, data)
following_followed = [session['user_id']]
if follower_ids:
for j in follower_ids:
if j in followed_ids:
is_okay = True
following_followed.append(j)
print(True)
else:
is_okay = False
dec_whispers = []
print(False)
else:
is_okay = False
dec_whispers = []
print(False)
print(f"Annie are you okay? {is_okay}")
# if is_okay:
# for i in following_followed:
print(f"following_followed iteration: {following_followed}")
mysql = connectToMySQL(DATABASE)
query = """SELECT messages.author_id, messages.message_id, messages.message, f8x0a94mtjmenwxa.keys.user_key, users.first_name, users.last_name, users.user_id
FROM messages
JOIN f8x0a94mtjmenwxa.keys
ON messages.author_id = f8x0a94mtjmenwxa.keys.user_id
JOIN users ON messages.author_id = users.user_id
LEFT JOIN user_likes
ON messages.message_id = user_likes.message_like_id"""
# WHERE users.user_id = %(u_id)s OR users.user_id = %(s_id)s"""
# ORDER BY messages.message_id DESC"""
# data = {
# 'u_id': following_followed,
# 's_id': session['user_id']
# }
dec_whispers = mysql.query_db(query, data)
# print(dec_whispers)
for k in dec_whispers:
# print(following_followed)
# print(k['author_id'])
# print(type(k['author_id']))
if k['author_id'] in following_followed:
print(k)
# print(dec_whispers)
# print(k['user_key'])
key = (k['user_key'])
f = Fernet(key)
k['message'] = f.decrypt(b(k['message']), ttl=None)
k['message'] = k['message'].decode("utf-8")
mysql = connectToMySQL(DATABASE)
query = "SELECT user_key FROM f8x0a94mtjmenwxa.keys WHERE user_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
key_data = mysql.query_db(query, data)
if key_data:
key_data = key_data[0]
return render_template("profile.html", user_data = user_data[0], whispers=whispers, key_data=key_data, dec_whispers=dec_whispers, users = users, followed_ids=followed_ids, follower_ids = follower_ids)
@app.route('/write_whisper_profile', methods=['POST'])
def on_add_whisper_profile():
if 'user_id' not in session:
return redirect("/")
is_valid = True
if len(request.form['a_whisper']) < 5:
is_valid = False
flash("Whisper must be at least 5 characters.")
if is_valid:
mysql = connectToMySQL(DATABASE)
query = "SELECT user_key FROM dojo_messages.keys WHERE user_id = %(u_id)s"
data = {
'u_id': session['user_id']
}
key_data = mysql.query_db(query, data)
if key_data:
key_data = key_data[0]
key = b(key_data['user_key'])
crypt_message = request.form['a_whisper'].encode()
f = Fernet(key)
encrypted_message = f.encrypt(crypt_message)
# print(f"this is an {encrypted_message} message!!!!!")
mysql = connectToMySQL(DATABASE)
query = "INSERT INTO messages (message, author_id, created_at, updated_at) VALUES (%(msg)s, %(a_id)s, NOW(), NOW())"
data = {
'msg': encrypted_message,
'a_id': session['user_id']
}
mysql.query_db(query, data)
return redirect(request.referrer)
@app.route("/contact_us")
def contact_us():
return render_template("/contact.html")
if __name__ == "__main__":
app.run(debug=True)
|
from random import random
class City(object):
def __init__(self, x=0, y=0, create_random=False):
self.x = x
self.y = y
if create_random:
self.x = int(round(random() * 200))
self.y = int(round(random() * 200))
def distance_to(self, city):
import math
x_distance = abs(self.x - city.x)
y_distance = abs(self.y - city.y)
distance = math.sqrt((x_distance * x_distance) + (y_distance * y_distance))
return distance
def __str__(self):
return "(%d, %d)" % (self.x, self.y) |
import numpy as np
import random
class Bandit:
"""Bandit problem abstract class.
"""
def __init__(self, num_arms, name='bandit'):
"""Initialization.
Args:
num_arms: number of arms for bandit problem
name: custom name for bandit
"""
self.num_arms = num_arms
self.name = name
def get_num_arms(self):
"""Get the number of arms.
Returns:
number of arms
"""
return self.num_arms
def pull(self, arm):
"""Pull arm.
Args:
arm: arm index
Returns:
reward from pulling arm
"""
raise NotImplementedError
def get_expected_reward_optimal_arm(self):
"""Get expected reward of the optimal arm.
For calculating regret.
Returns:
expected reward of the optimal arm
"""
raise NotImplementedError
def get_expected_reward_arm(self, arm):
"""Get expected reward of arm.
For calculating regret.
Args:
arm: arm index
Returns:
expected reward of arm
"""
raise NotImplementedError
def get_name(self):
"""Get name of bandit.
Returns:
custom name of bandit
"""
return self.name
class SBRDBandit(Bandit):
"""Scaled binomial reward distribution (SBRD) bandit problem.
"""
def __init__(self, arm_params, name='bandit'):
"""Initialization.
Args:
arm_params: list of parameter tuples for each arm [(r_a, p_a)]
arm_params[a] gives tuple (r, p) for arm a where
r is the [0,1] reward and
p is the probability of reward for SBRD
"""
# input sanity check
for i, (r, p) in enumerate(arm_params):
if r < 0. or r > 1.:
raise ValueError('Invalid r param ({0}) for arm {1}'.format(r, i))
if p < 0. or p > 1.:
raise ValueError('Invalid p param ({0}) for arm {1}'.format(p, i))
# initialize
Bandit.__init__(self, len(arm_params), name)
self.arm_params = arm_params
self.optimal_arm = np.argmax([p*r for (r, p) in arm_params])
def pull(self, arm):
"""Pull arm. Implements SBRD.
Args:
arm: arm index
Returns:
reward from pulling arm
"""
r, p = self.arm_params[arm]
success = random.uniform(0, 1) <= p
return r if success else 0
def get_expected_reward_optimal_arm(self):
"""Get expected reward of the optimal arm.
For calculating regret.
Returns:
expected reward of the optimal arm
"""
return self.get_expected_reward_arm(self.optimal_arm)
def get_expected_reward_arm(self, arm):
"""Get expected reward of arm.
For calculating regret.
Args:
arm: arm index
Returns:
expected reward of arm
"""
(r, p) = self.arm_params[arm]
return r*p
|
import pygame
class GameView:
def __init__(self, _window_width, _window_height):
self.display_surf = pygame.display.set_mode((_window_width, _window_height))
def update_display(self):
pygame.display.update()
def draw_background(self, _color):
self.display_surf.fill(_color)
def draw_grid(self, _color, _width, _height, _cell_size):
for x in range(0, _width, _cell_size): # draw vertical lines
pygame.draw.line(self.display_surf, _color, (x, 0), (x, _height))
for y in range(0, _height, _cell_size): # draw horizontal lines
pygame.draw.line(self.display_surf, _color, (0, y), (_width, y))
def draw_cell(self, _cell_position, _cell_size, _color):
x = _cell_position.getX() * _cell_size
y = _cell_position.getY() * _cell_size
cell_rect = pygame.Rect(x, y, _cell_size, _cell_size)
pygame.draw.rect(self.display_surf, _color, cell_rect)
def draw_list_of_cells(self, _cell_positions, _cell_size, _color):
for pos in _cell_positions:
self.draw_cell(pos, _cell_size, _color)
def draw_text(self, _text, _color, _font, _pos, _pos_type):
text_surf = _font.render(_text, True, _color)
text_rect = text_surf.get_rect()
if _pos_type == "midtop":
text_rect.midtop = (_pos.getX(), _pos.getY())
elif _pos_type == "topleft":
text_rect.topleft = (_pos.getX(), _pos.getY())
elif _pos_type == "center":
text_rect.center = (_pos.getX(), _pos.getY())
else:
raise ValueError('Unexpected position type: ' + _pos_type)
self.display_surf.blit(text_surf, text_rect) |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-09-17 13:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ExamItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.TextField(blank=True, default=b'', max_length=500)),
('score_result', models.PositiveIntegerField(blank=True, default=1)),
],
),
migrations.CreateModel(
name='ExamLibItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField(max_length=500)),
('content', models.TextField(blank=True, max_length=500, null=True)),
('type', models.CharField(choices=[(b'choice', b'Choice'), (b'answer', b'Answer')], max_length=45)),
('a', models.TextField(blank=True, max_length=500)),
('b', models.TextField(blank=True, max_length=500)),
('c', models.TextField(blank=True, max_length=500)),
('d', models.TextField(blank=True, max_length=500)),
('score', models.PositiveIntegerField()),
('ref_answer', models.TextField(max_length=500, verbose_name=b'ref_answer')),
('category', models.CharField(choices=[(b'ip', b'IP'), (b'linux', b'Linux'), (b'lte', b'LTE'), (b'python', b'Python'), (b'robot', b'Robot'), (b'test', b'Test'), (b'log', b'LOG')], max_length=45)),
('source', models.CharField(blank=True, max_length=120, null=True)),
('contributor', models.CharField(blank=True, max_length=45, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='ExamResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.IntegerField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120, verbose_name=b'Paper Name')),
('type', models.CharField(choices=[(b'a', b'A'), (b'b', b'B'), (b'c', b'C'), (b'd', b'D')], max_length=45, verbose_name=b'Paper Type')),
('total_score', models.IntegerField()),
('examlibitem', models.ManyToManyField(blank=True, to='exam.ExamLibItem', verbose_name=b'Exam Library Item')),
],
),
migrations.AddField(
model_name='examresult',
name='paper',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Paper'),
),
migrations.AddField(
model_name='examresult',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='examitem',
name='exam_result',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='exam.ExamResult'),
),
migrations.AddField(
model_name='examitem',
name='examlibitem',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.ExamLibItem'),
),
migrations.AddField(
model_name='examitem',
name='paper',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Paper'),
),
migrations.AddField(
model_name='examitem',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
from PIL import Image
def img_show(img):
pil_img = Image.fromarray(np.uint8(img))
pil_img.show()
|
from elements.code_segment import CodeSegment
from elements.datatype_elements import StringElement, NumberElement, ArrayElement
from utils.commands import *
from utils.util import flatten, is_num, is_variable_like
class Line(CodeSegment):
def __init__(self, string: str):
"""Line
Represents single line of code. Contains
information about parent
:param string: code line, String
"""
self.command = parse_line(string)
self.parent = None
self.command.set_parent(self)
def write_out(self, sqf=False):
"""Write out
returns line contents with ; added to the end.
:return: String
"""
if sqf:
string = self.command.write_sqf()
else:
string = self.command.write_out()
if self.is_pre_block():
return self.indent() + string
else:
return self.indent() + string + ";\n"
def is_pre_block(self):
if isinstance(self.command, PRE_BLOCK_COMMANDS):
return True
else:
return False
def parse_line(string: str) -> GenericElement:
"""Parse line to command
Parses line contents and turns it to
element.code_elements.GenericElement's
subclass instance.
:param string: str
:return: Command
"""
segmented_line = divide_into_segments(string)
mixed_list = get_literal_elements(segmented_line)
hierarchy = get_hierarchy(mixed_list)
command = flatten(get_commands(hierarchy))[0]
return command
def get_hierarchy(segment_list: list, opening: str= "(",
closing: str= ")", ignore_equals: bool=False) -> list:
"""Get hierarchy
Method goes trough the list given as a parameter
and parses it to multidimensional list based on "("
and ")" cells found in it.
example ["a", "(", "b", ")", "c"] would return
the following: ["a", ["b"], "c"]
:param segment_list: One dimensional list
:return: Multidimensional list
"""
element_list = []
index = 0
in_sublist = 0
while index < len(segment_list):
current = segment_list[index]
if current in SET_COMMANDS and not ignore_equals:
# =, -= and += are special
element_list.append(current)
element_list.append(get_hierarchy(segment_list[index+1:], opening, closing, ignore_equals))
index = len(segment_list)
elif in_sublist == 0:
if current == opening:
in_sublist += 1
returned_list = get_hierarchy(segment_list[index+1:], opening, closing, ignore_equals)
element_list.append(returned_list)
elif current == closing:
return element_list
else:
element_list.append(current)
elif current == opening:
in_sublist += 1
elif current == closing:
in_sublist -= 1
else:
pass
index += 1
return element_list
def get_commands(hl: list):
# TODO: Maths: order of computation
i = 0
command_list = []
while i < len(hl):
if type(hl[i]) is list:
command_list.append(get_commands(hl[i]))
elif type(hl[i]) is str:
if hl[i] in COMMANDS.keys():
command_list.append(create_command(hl[i], get_commands(hl[i+1])))
i += 1
elif hl[i] in TWO_SIDED_COMMANDS.keys():
asd = hl[i]
if isinstance(hl[i+1], GenericElement):
right = hl[i+1]
elif type(hl[i+1]) is list:
right = get_commands(hl[i+1])
else:
right = get_commands(hl[i+1:])
i = len(hl)
command_list.append(
create_math_command(asd,
command_list.pop(),
right))
i += 1
elif hl[i] in NO_PARAM_COMMANDS.keys():
command_list.append(
NO_PARAM_COMMANDS[hl[i]]()
)
i += 1
elif hl[i] in ARRAY_COMMANDS:
command_list.append(
create_command(hl[i],command_list.pop()))
else:
command_list.append(hl[i])
else:
command_list.append(hl[i])
i += 1
return command_list
def divide_into_segments(string: str) -> list:
"""Divide string to segments
Method divides given string to logical code segments
starting from left to right.
example: '"asd"+(random(1,5) == var123)'
would produce: ", asd, ", +, (, random, (, 1, 5, ), ==, var123, )
:param string: str
:return: list
"""
# TODO: use regular expressions
operators = ["=", "!", "<", ">", "|", "&", "+", "-"]
segment_list = []
looking_for = None
start = -1
end = False
for i in range(len(string)):
if looking_for == "int":
if not string[i].isdigit() and string[i] != ".":
end = i
if looking_for == "str":
if not string[i].isalnum():
end = i
if looking_for == "operator":
if not string[i] in operators:
end = i
elif looking_for == "pass":
end = i
if end:
segment = string[start: end]
segment_list.append(segment)
end = False
looking_for = None
if looking_for is None:
start = i
if string[i].isspace() or string[i] == ",":
start = -1
elif string[i].isdigit():
looking_for = "int"
elif string[i].isalpha() or string[i] in [".", "_"]:
looking_for = "str"
elif string[i] in operators:
looking_for = "operator"
else:
looking_for = "pass"
if looking_for is not None:
segment_list.append(string[start:])
return segment_list
def get_literal_elements(segment_list: list) -> list:
"""Get literal elements
Method goes trough list and forms string and num literals
as well as variables.
- String literals are limited by " (' doesnt work)
- Num literals are segments only containing digits.
decimals are not yet supported.
- Variables are alphanumeric segments that don't
match to any supported commands
:param segment_list: list
:return: list
"""
# get string elements
new_list = []
to_be_joined = []
in_string = False
for segment in segment_list:
if segment == '"':
if not in_string:
in_string = True
else:
in_string = False
new_list.append(StringElement(' '.join(to_be_joined)))
to_be_joined = []
elif not in_string:
new_list.append(segment)
else:
to_be_joined.append(segment)
# get number elements and variables
another_list = []
for segment in new_list:
if type(segment) is str:
if is_num(segment):
another_list.append(NumberElement(segment))
elif is_variable_like(segment) and segment not in ALL_COMMANDS.keys():
another_list.append(VariableElement(segment))
else:
another_list.append(segment)
else:
another_list.append(segment)
hierarchical_list = get_hierarchy(another_list, "[", "]", True)
returned_list = get_array_elements(hierarchical_list)
results = flatten(returned_list)
return results
def get_array_elements(l: list, level: int=0) -> list:
results = []
for e in l:
if type(e) is list:
results.append(get_array_elements(e, level+1)[0])
else:
results.append(e)
if level > 0:
a = ArrayElement()
a.add_element_list(results)
return [a]
else:
return results
def create_command(command: str, params):
args = flatten(params)
try:
return ALL_COMMANDS[command](*args)
except TypeError:
print("### ERROR WHILE:")
print("creating command: {}({})".format(str(command), str(args)))
def create_math_command(command: str, left, right):
args = flatten([left, right])
try:
return TWO_SIDED_COMMANDS[command](*args)
except TypeError:
print("### ERROR WHILE:")
print("creating math command: {} {} {}".format(str(args[0]), str(command), str(args[1])))
print("all args: ", str(args))
if __name__ == "__main__":
# For testing purposes
test_lines = [
'hint("asd")',
'random(random(1, 2),random(4,5))',
'1 + 2 + 3 + 4',
'"asd" + "kek"',
'random(((1))+(2),(((3))+(2)))',
'random((1),((2)))'
]
for line in test_lines:
a = parse_line(line)
print(a.write_sqf())
|
from datetime import datetime
WEEKDAYS = ('Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday')
def day(date):
return WEEKDAYS[datetime.strptime(date, '%Y%m%d').weekday()]
|
import nltk
from nltk.tokenize import sent_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
class TextTokenization:
#sentence tokenization
def sentenceTokenize(self, rawText):
return sent_tokenize(rawText)
#word tokenization
def wordTokenize(self, sentence):
sentence.lower()
tokenizer = RegexpTokenizer(r"'?([a-zA-z'-]+|[\.\!\?\,])'?")
tokens = tokenizer.tokenize(sentence)
#filteredWords = [w for w in tokens if not w in stopwords.words('english')]
return tokens#filteredWords
|
file = open('commands.txt', 'w')
file.write('This is file.write.')
file = open('commands.txt', 'r')
print('this is file.readline() : ', file.readline())
print('this is file.read(10)', file.read(10))
print('this is file.read()', file.read())
file.close()
file = open('commands.txt', 'a+')
file.write('this is file.write() in a+ mode')
file = open('commands.txt', 'r')
print(file.read())
file.close() |
import time
from django.test.client import Client
import oauth2 as oauth
from piston.models import Consumer as ConsumerModel
from test_utils import RequestFactory
class TestClient(object):
def __init__(self):
ConsumerModel.objects.create(name='test', key='test',
secret='test', status='accepted')
self.url = 'http://testserver/subscriptions/subscribe/'
self.consumer = oauth.Consumer(key='test', secret='test')
self.signature_method = oauth.SignatureMethod_HMAC_SHA1()
# we're only using 2-legged auth, no need for tokens
self.token = None
self.params = {
'oauth_version': '1.0',
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
}
self.c = Client()
self.rf = RequestFactory()
def tearDown(self):
ConsumerModel.objects.filter(key='test').delete()
def headers(self, method='POST', **kwargs):
params = self.params.copy()
params.update(kwargs)
req = oauth.Request(method=method, url=self.url, parameters=params)
req.sign_request(self.signature_method, self.consumer, self.token)
return req.to_header()
def subscribe(self, **kwargs):
header = self.headers(**kwargs)
return self.c.post(self.url, kwargs, **header)
def subscribe_request(self, **kwargs):
header = self.headers(**kwargs)
return self.rf.post(self.url, kwargs, **header)
def read(self, **kwargs):
header = self.headers(method="GET", **kwargs)
return self.c.get(self.url, kwargs, **header)
|
import tensorflow as tf
from tensorflow import keras as k
from util_tf import instance_norm
def MFNet():
# Model initialization
inputs1 = k.Input(shape=(None, None, 1))
inputs2 = k.Input(shape=(None, None, 1))
concat = k.layers.concatenate([inputs1, inputs2], axis=3)
cnn = mf_module(name='mf_module')
outputs = tf.nn.tanh(cnn(concat))
cnn = k.Model(
inputs=[inputs1, inputs2],
outputs=outputs, name='MFNet')
return cnn
class mf_module(k.Model):
def __init__(self, name):
super(mf_module, self).__init__(name=name)
self.initializer = k.initializers.TruncatedNormal(stddev=0.02)
self.regularizer = tf.keras.regularizers.l1(l=0.0005)
self.conv1 = k.layers.Conv2D(32, (7, 7),
strides=(1, 1),
padding='valid', use_bias=True,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
input_shape=(None, None, 2))
self.conv2 = k.layers.Conv2D(64, (4, 4),
strides=(2, 2),
padding='valid', use_bias=False,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer)
self.norm2 = instance_norm()
self.conv2_2 = k.layers.Conv2D(64, (3, 3),
strides=(1, 1),
padding='valid', use_bias=False,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer)
self.norm2_2 = instance_norm()
self.conv3 = k.layers.Conv2D(128, (4, 4),
strides=(2, 2),
padding='valid', use_bias=False,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer)
self.norm3 = instance_norm()
self.conv3_2 = k.layers.Conv2D(128, (3, 3),
strides=(1, 1),
padding='valid', use_bias=False,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer)
self.norm3_2 = instance_norm()
self.conv4 = k.layers.Conv2D(256, (4, 4),
strides=(2, 2),
padding='valid', use_bias=False,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer)
self.norm4 = instance_norm()
self.conv4_2 = k.layers.Conv2D(256, (3, 3),
strides=(1, 1),
padding='valid', use_bias=False,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer)
self.norm4_2 = instance_norm()
self.upsample5 = k.layers.UpSampling2D()
self.conv5 = k.layers.Conv2D(
128, (3, 3),
strides=(1, 1),
padding='valid', use_bias=False, kernel_initializer=self.
initializer, kernel_regularizer=self.regularizer)
self.norm5 = instance_norm()
self.conv5_2 = k.layers.Conv2D(128, (3, 3),
strides=(1, 1),
padding='valid', use_bias=False,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer)
self.norm5_2 = instance_norm()
self.upsample6 = k.layers.UpSampling2D()
self.conv6 = k.layers.Conv2D(
64, (3, 3),
strides=(1, 1),
padding='valid', use_bias=False, kernel_initializer=self.
initializer, kernel_regularizer=self.regularizer)
self.norm6 = instance_norm()
self.conv6_2 = k.layers.Conv2D(64, (3, 3),
strides=(1, 1),
padding='valid', use_bias=False,
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer)
self.norm6_2 = instance_norm()
self.upsample7 = k.layers.UpSampling2D()
self.conv7 = k.layers.Conv2D(
32, (3, 3),
strides=(1, 1),
padding='valid', use_bias=False, kernel_initializer=self.
initializer, kernel_regularizer=self.regularizer)
self.norm7 = instance_norm()
self.conv8 = k.layers.Conv2D(
1, (7, 7),
strides=(1, 1),
padding='valid', use_bias=True, kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer)
def call(self, inputs, training=False):
# Encoder
x = tf.pad(inputs, [[0, 0], [3, 3], [3, 3], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.conv1(x))
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm2(self.conv2(x), training=training))
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm2_2(self.conv2_2(x), training=training))
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm3(self.conv3(x), training=training))
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm3_2(self.conv3_2(x), training=training))
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm4(self.conv4(x), training=training))
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm4_2(self.conv4_2(x), training=training))
# Decoder
x = self.upsample5(x)
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm5(self.conv5(x), training=training))
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm5_2(self.conv5_2(x), training=training))
x = self.upsample6(x)
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm6(self.conv6(x), training=training))
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm6_2(self.conv6_2(x), training=training))
x = self.upsample7(x)
x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'CONSTANT')
x = tf.nn.relu(self.norm7(self.conv7(x), training=training))
x = tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]], 'CONSTANT')
x = self.conv8(x)
return x
|
'''# First Method
import pygame
pygame.mixer.init()
pygame.mixer.music.load('song.mp3')
pygame.mixer.music.play()
while pygame.mixercontinue.music.get_busy() == True:
#Second Method
import os
os.system('vlc song.mp3 &')'''
# chalo isi tarah sab kar lenege ok ?
#test on mac
import subprocess
import os
#print os.path.exists("C:/Users/Dhruv/Desktop/Motivation/RiseShine.mp4")
#p = subprocess.Popen(["/Applications/VLC.app/Contents/MacOS/VLC","./veg_girl.mp4"])
os.system('/Applications/VLC.app/Contents/MacOS/VLC veg_girl.mp4') |
from telegram import ReplyKeyboardMarkup
from telegram.ext import Updater, CommandHandler, ConversationHandler, MessageHandler, Filters
from bs4 import BeautifulSoup as BS
from telegram.ext import CallbackContext
import requests
keyboard=ReplyKeyboardMarkup([
['Andijon','Buxoro'],
["Farg'ona","Jizzax"],
['Namangan','Navoiy'],
['Qashqadaryo','Surxondaryo'],
['Sirdaryo','Samarqand'],
["Qoraqalpog'iston","Xorazm"],
['Toshkent viloyati','Toshkent']
],resize_keyboard=True)
def start(update, context):
user = update.message.from_user
# reply_markup = InlineKeyboardMarkup(keyboard)
reply_markup=keyboard
update.message.reply_html(
'Assalomu Aleykum <b>{}</b> 🤝. uzbweather_bot ga xush kelibsiz !!! \n \nSizga foydamiz tegganidan xursandmiz😊. Iltimos hududni tanlang:'.format(
user.full_name), reply_markup=reply_markup)
return 1
def andijon(update, context:CallbackContext):
print("siz andijonni tanladingiz")
r = requests.get('https://sinoptik.ua/погода-андижан')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Andijon belgilandi. Bugun Andijonda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def buxoro(update, context):
r = requests.get('https://sinoptik.ua/погода-бухара')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Buxoro belgilandi. Bugun Buxoroda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def fargona(update, context):
r = requests.get('https://sinoptik.ua/погода-фергана')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Farg'ona belgilandi. Bugun Farg'onada ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def jizzax(update, context):
r = requests.get('https://sinoptik.ua/погода-джизак')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Jizzax belgilandi. Bugun Jizzaxda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def namangan(update, context):
r = requests.get('https://sinoptik.ua/погода-наманган')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Namangan belgilandi. Bugun Namanganda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def navoiy(update, context):
r = requests.get('https://sinoptik.ua/погода-навои')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Navoiy belgilandi. Bugun Navoiyda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def qashqadaryo(update, context):
r = requests.get('https://sinoptik.ua/погода-карши')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Qashqadaryo belgilandi. Bugun Qashqadaryoda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def qoraqalpogiston(update, context):
r = requests.get('https://sinoptik.ua/погода-нукус')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Qoraqalpog'iston belgilandi. Bugun Qoraqalpog'istonda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def samarqand(update, context):
r = requests.get('https://sinoptik.ua/погода-самарканд')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Samarqand belgilandi. Bugun Samarqandda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def sirdaryo(update, context):
r = requests.get('https://sinoptik.ua/погода-сырдарья')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Sirdaryo belgilandi. Bugun Sirdaryoda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def surxondaryo(update, context):
r = requests.get('https://sinoptik.ua/погода-термез')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
#############################################
t_min = minimum[0].text
t_max = maximum[0].text
#############################################
update.message.reply_text(
"Surxondaryo belgilandi. Bugun Surxondaryoda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def toshkent_viloyati(update, context):
r = requests.get('https://sinoptik.ua/погода-кибрай')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
t_min = minimum[0].text
t_max = maximum[0].text
update.message.reply_text(
"Toshkent viloyati belgilandi. Bugun Toshkent viloyatida ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def toshkent(update, context):
r = requests.get('https://sinoptik.ua/погода-ташкент')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
t_min = minimum[0].text
t_max = maximum[0].text
update.message.reply_text(
"Buxoro belgilandi. Bugun Buxoroda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def xorazm(update, context):
r = requests.get('https://sinoptik.ua/погода-ургенч')
html = BS(r.content, 'html.parser')
minimum = html.findAll("div", {"class": "min"})
maximum = html.findAll("div", {"class": "max"})
t_min = minimum[0].text
t_max = maximum[0].text
print(t_max)
update.message.reply_text(
"Xorazm belgilandi. Bugun Xorazmda ob-havo 🌦👇:" + '\n' + "Eng past daraja⬇️ :" + t_min + ',' + '\n' + "Eng yuqori daraja⬆️ :" + t_max,
reply_markup=keyboard)
def help_command(update, context):
update.message.reply_text("Yordam uchun @Dilshod2708 ga murojaat qiling .")
def admin(update, context):
update.message.reply_text("Admin👨🏻💻 bilan bog'lanish - @@Dilshod2708 ")
# def main():
#######################################################################################
updater = Updater(token='2067264061:AAHGTALp8LIfwHIGnDNh61ye19-nIPOsitk',use_context=True)
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
1: [
MessageHandler(Filters.regex('^(Andijon)$'), andijon),
MessageHandler(Filters.regex('^(Buxoro)$'), buxoro),
MessageHandler(Filters.regex("^(Farg'ona)$"), fargona),
MessageHandler(Filters.regex('^(Jizzax)$'), jizzax),
MessageHandler(Filters.regex('^(Namangan)$'), namangan),
MessageHandler(Filters.regex('^(Navoiy)$'), navoiy),
MessageHandler(Filters.regex('^(Qashqadaryo)$'), qashqadaryo),
MessageHandler(Filters.regex("^(Qoraqalpog'iston)$"), qoraqalpogiston),
MessageHandler(Filters.regex('^(Samarqand)$'), samarqand),
MessageHandler(Filters.regex('^(Sirdaryo)$'), sirdaryo),
MessageHandler(Filters.regex('^(Surxondaryo)$'), surxondaryo),
MessageHandler(Filters.regex('^(Toshkent viloyati)$'), toshkent_viloyati),
MessageHandler(Filters.regex('^(Toshkent)$'), toshkent),
MessageHandler(Filters.regex('^(Xorazm)$'), xorazm),
]
},
fallbacks=[MessageHandler(Filters.text, start)]
)
updater.dispatcher.add_handler(conv_handler)
#######################################################################################
updater.dispatcher.add_handler(CommandHandler('start', start))
updater.dispatcher.add_handler(CommandHandler('help', help_command))
updater.dispatcher.add_handler(CommandHandler('admin', admin))
updater.start_polling()
# updater.idle()
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.cluster import AffinityPropagation
from matplotlib import pyplot as plt
from sklearn import metrics
from sklearn.cluster import spectral_clustering
from sklearn.cluster import MeanShift, estimate_bandwidth
features = ['RI', 'Na', 'Mg', 'Al', 'Si', 'K', 'Ca', 'Ba', 'Fe']
def plot_pca(df, data):
data = StandardScaler().fit_transform(data)
pca = PCA(n_components=2)
principal_Components = pca.fit_transform(data)
principal_Df = pd.DataFrame(data=principal_Components, columns=['principal component 1', 'principal component 2'])
final_Df = pd.concat([principal_Df, df[['Type']]], axis=1)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('Principal Component 1', fontsize=15)
ax.set_ylabel('Principal Component 2', fontsize=15)
ax.set_title('2 component PCA', fontsize=20)
types = [1, 2, 3, 4, 5, 6, 7]
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
for type, color in zip(types, colors):
indices = final_Df['Type'] == type
ax.scatter(final_Df.loc[indices, 'principal component 1']
, final_Df.loc[indices, 'principal component 2']
, c=color
, s=50)
ax.legend(types)
ax.grid()
plt.savefig("pca.png")
def preprocessing(df, data):
data = StandardScaler().fit_transform(data)
pca = PCA(0.95, whiten=True)
principal_Components = pca.fit_transform(data)
print("pca variance: ", pca.explained_variance_ratio_)
return principal_Components
def kmeans(x, labels_true):
estimator = KMeans(init='k-means++', n_clusters=7, n_init=50, max_iter=1000, n_jobs=4)
estimator.fit(x)
print("--------K-Means----------")
labels = estimator.labels_
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(x, labels, metric='sqeuclidean'))
def affinity_prop(x, labels_true):
af = AffinityPropagation(max_iter=1000).fit(x)
labels = af.labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("--------Affinity_Propagation----------")
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(x, labels, metric='sqeuclidean'))
def mean_shift(x, labels_true):
bandwidth = estimate_bandwidth(x)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(x)
labels = ms.labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("--------Affinity_Propagation----------")
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("0 Coefficient: %0.3f"
% metrics.silhouette_score(x, labels, metric='sqeuclidean'))
DATASET_PATH = './glass_data_labeled.csv'
df = pd.read_csv(DATASET_PATH)
# Separating out the features
x = df.loc[:, features].values
# Separating out the type
y = df.loc[:, ['Type']].values
y = y.reshape(y.shape[0])
# plot_pca(df, x)
x = preprocessing(df, x)
kmeans(x, y)
affinity_prop(x, y)
mean_shift(x, y)
|
#Faça um programa que leia um ângulo qualquer e mostre na tela o valor do seno, cosseno e tangente desse ângulo.
import math
angulo = float(input('Digite o ângulo que você deseja: '))
seno = math.sin(math.radians(angulo))
cosseno = math.cos(math.radians(angulo))
tangente = math.tan(math.radians(angulo))
print('O ângulo de {} tem p SENO de {:.2f}'.format(angulo, seno))
print('O ângulo de {} tem p COSSENO de {:.2f}'.format(angulo, cosseno))
print('O ângulo de {} tem p TANGENTE de {:.2f}'.format(angulo, tangente))
'''from math import radians, sin, cos, tan
angulo = float(input('Digite o ângulo que você deseja: '))
seno = sin(radians(angulo))
cosseno = cos(radians(angulo))
tangente = tan(radians(angulo))
print('O ângulo de {} tem p SENO de {:.2f}'.format(angulo, seno))
print('O ângulo de {} tem p COSSENO de {:.2f}'.format(angulo, cosseno))
print('O ângulo de {} tem p TANGENTE de {:.2f}'.format(angulo, tangente))''' |
import PIL.Image as Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
class CIFARDataset(Dataset):
def __init__(self, images, labels, training=True):
self.images = images
self.labels = labels
if training:
self.transform = self._train_transform()
else:
self.transform = self._test_transform()
def __getitem__(self, index):
img = Image.fromarray(self.images[index])
img = self.transform(img)
target = self.labels[index]
return img, target
def _train_transform(self):
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
return transform
def _test_transform(self):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
return transform
def __len__(self):
return len(self.images)
class BYOLCIFARDataset(CIFARDataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.train_aug = self._train_transform()
self.test_aug = self._test_transform()
def __getitem__(self, index):
img = Image.fromarray(self.images[index])
x = self.test_aug(img.copy())
x1 = self.train_aug(img.copy())
x2 = self.train_aug(img.copy())
target = self.labels[index]
return x, x1, x2, target |
from django.contrib import admin
# Register your models here.
from .models import Found, ProvinceFound
@admin.register(Found)
class FoundAdmin(admin.ModelAdmin):
list_display = ('found_name', 'manager', 'money', 'company', 'category', 'grant_year')
list_filter = ('category', 'grant_year')
search_fields = ('found_name', 'company')
list_per_page = 100
@admin.register(ProvinceFound)
class ProvinceFoundAdmin(admin.ModelAdmin):
list_display = ('found_name', 'manager', 'money', 'province', 'grant_year')
list_filter = ('category', 'grant_year', 'province')
search_fields = ('found_name', 'company')
list_per_page = 100
|
import unittest
from katas.kyu_6.string_shortener_shrink import shorten
class ShortenTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(shorten(
'The quick brown fox jumps over the lazy dog', 27
), 'The quick br...the lazy dog')
def test_equal_2(self):
self.assertEqual(shorten(
'The quick brown fox jumps over the lazy dog', 27, '----'
), 'The quick b----the lazy dog')
def test_equal_3(self):
self.assertEqual(shorten('hello world', 5, '....'), 'hello')
def test_equal_4(self):
self.assertEqual(shorten('hello world', 6, '....'), 'h....d')
def test_equal_5(self):
self.assertEqual(shorten('hello', 7), 'hello')
|
import timeit
from pdf2image import convert_from_path
from print import log
def convert_to_jpg(pdf_filename, export_path) -> list:
# Convert to jpg
log('Starting conversion to jpg...')
start = timeit.default_timer()
pages = convert_from_path(pdf_filename, 500)
end = timeit.default_timer()
log('Successfully converted %d pages in %d seconds.' % (len(pages), end - start))
# Save images
log('Saving to %s/...' % export_path)
image_filenames = []
for i, page in enumerate(pages):
image_filename = '%s/p%d.jpg' % (export_path, i)
image_filenames.append(image_filename)
page.save(image_filename, 'JPEG')
log('Saved all images')
return image_filenames
|
from itertools import cycle
from random import randint
from common.fields import JsonField
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.db.models import F, Q, Sum, FloatField
from django.utils.translation import gettext_lazy as _
from starwars.enums import (
SKILL_DEPENDANCIES, SPECIES, SPECIES_ABILITIES, ITEM_TYPES, ITEM_WEAPON, ITEM_ARMOR, RANGE_BANDS, ITEM_SKILLS,
EFFECT_ATTRIBUTE_MODIFIER, ATTRIBUTE_MAX_HEALTH, ATTRIBUTE_MAX_STRAIN, ATTRIBUTE_DEFENSE, ATTRIBUTE_SOAK_VALUE,
DICT_STATS, DICT_SKILLS, STAT_BRAWN, STAT_WILLPOWER, EFFECT_TYPES, DICE_TYPES, ATTRIBUTES, CHARACTER_TYPES,
STAT_PRESENCE, COOL, CHARACTER_TYPE_PC, DICE_LIGHT_FORCE, DICE_DARK_FORCE, VIGILANCE, DICE_TRIUMPH, DICE_ADVANTAGE,
EFFECT_DURATIONS, ACTIVATION_TYPE_PASSIVE, ACTIVATION_TYPES, CHARACTER_TYPE_NEMESIS, DIFFICULTY_SIMPLE,
DICE_TYPE_DIFFICULTY, DICE_TYPE_CHALLENGE, EFFECT_DICE_POOL_MODIFIER, EFFECT_DURATION_FIGHT,
EFFECT_DURATION_DIRECT, EFFECT_HEALTH_MODIFIER, EFFECT_STRAIN_MODIFIER, EFFECT_DURATION_TARGET_TURN,
EFFECT_DURATION_SOURCE_TURN, RANGE_ENGAGED, CHARACTER_TYPE_MINION)
from starwars.utils import roll_dice
class Player(AbstractUser):
nickname = models.CharField(max_length=100, blank=True, verbose_name=_("surnom"))
def __str__(self):
return self.nickname or self.first_name or self.username
class Meta:
verbose_name = _("joueur")
verbose_name_plural = _("joueurs")
class NamedModel(models.Model):
name = models.CharField(max_length=50, verbose_name=_("nom"))
description = models.TextField(blank=True, verbose_name=_("description"))
def __str__(self):
return self.name
class Meta:
abstract = True
class Campaign(NamedModel):
destiny_usage_percentage = models.PositiveSmallIntegerField(
default=10, verbose_name=_("pourcentage d'utilisation des jetons de destinée"))
light_tokens = models.PositiveSmallIntegerField(default=0, verbose_name=_("light token"))
dark_tokens = models.PositiveSmallIntegerField(default=0, verbose_name=_("dark token"))
def _use_light_token(self):
self.light_tokens -= 1
self.dark_tokens += 1
self.save()
def _use_dark_token(self):
self.dark_tokens -= 1
self.light_tokens += 1
self.save()
def set_destiny_tokens(self):
"""
Roll a force die for each playable characters, light/dark results stands for destiny tokens
:return: None
"""
forces_dice_result = roll_dice(force=self.characters.filter(type=CHARACTER_TYPE_PC).count())
self.light_tokens = forces_dice_result.get(DICE_LIGHT_FORCE)
self.dark_tokens = forces_dice_result.get(DICE_DARK_FORCE)
def get_destiny_upgrade(self, character, opposite=False):
"""
Rand 1-100, if the result is under the destiny_usage_percentage, return True
:return: bool
"""
use_ligth_token = (character.type == CHARACTER_TYPE_PC and not opposite) or (
character.type != CHARACTER_TYPE_PC and opposite)
if (use_ligth_token and not self.light_tokens) or (not use_ligth_token and not self.dark_tokens):
return False
rand = randint(1, 100)
if rand <= self.destiny_usage_percentage:
if use_ligth_token:
self._use_light_token()
else:
self._use_dark_token()
return True
return False
# Combat
def set_initiative(self, characters_awareness):
"""
Set the initiative for every characters involved in a fight
:param characters_awareness: dict (id_character: awareness(bool)
:return:
"""
first_active_character = None
for id_character, awareness in characters_awareness.items():
character = self.characters.get(pk=id_character)
skill_to_test = COOL if awareness else VIGILANCE
dice_result = roll_dice(**character.get_skill_dice(skill_to_test))
initiative = dice_result.get('remaining_success', 0) * 10 \
+ dice_result.get(DICE_TRIUMPH, 0) * 5 \
+ dice_result.get(DICE_ADVANTAGE, 0)
character.initiative = initiative
character.is_fighting = True
if not first_active_character or initiative > first_active_character.initiative:
first_active_character = character
character.save()
first_active_character.is_active = True
first_active_character.save()
def next_turn(self):
"""
End the actual character's turn and start the next character's turn
:return:
"""
fighting_characters = self.characters.filter(is_fighting=True)
active_character_turn_ended = False
next_character = None
for character in cycle(fighting_characters.order_by('-initiative')):
if character.is_active:
character.end_combat_turn()
active_character_turn_ended = True
continue
if not active_character_turn_ended:
continue
character.refresh_from_db()
character.start_combat_turn()
next_character = character
break
return next_character
def end_fight(self):
for character in self.characters.filter(is_fighting=True):
character.end_fight()
# Remove Effect with fight duration
CharacterEffect.objects.filter(
Q(Q(source_character__campaign_id=self.id) | Q(target__campaign_id=self.id)),
duration_type=EFFECT_DURATION_FIGHT).delete()
class Meta:
verbose_name = _("campagne")
verbose_name_plural = _("campagnes")
class Statistics(models.Model):
# Characteristics
brawn = models.PositiveSmallIntegerField(default=0, verbose_name=_("vigueur"))
agility = models.PositiveSmallIntegerField(default=0, verbose_name=_("agilité"))
intellect = models.PositiveSmallIntegerField(default=0, verbose_name=_("intelligence"))
cunning = models.PositiveSmallIntegerField(default=0, verbose_name=_("ruse"))
willpower = models.PositiveSmallIntegerField(default=0, verbose_name=_("volonté"))
presence = models.PositiveSmallIntegerField(default=0, verbose_name=_("présence"))
# Force
force = models.PositiveSmallIntegerField(default=0, verbose_name=_("force"))
morality = models.PositiveSmallIntegerField(default=50, verbose_name=_("moralité"))
conflit = models.PositiveSmallIntegerField(default=0, verbose_name=_("conflit"))
# Skills
# General skills
astrogation = models.PositiveSmallIntegerField(default=0, verbose_name=_("astrogation"))
athletics = models.PositiveSmallIntegerField(default=0, verbose_name=_("athlétisme"))
charm = models.PositiveSmallIntegerField(default=0, verbose_name=_("charme"))
coercion = models.PositiveSmallIntegerField(default=0, verbose_name=_("coercition"))
computers = models.PositiveSmallIntegerField(default=0, verbose_name=_("informatique"))
cool = models.PositiveSmallIntegerField(default=0, verbose_name=_("calme"))
coordination = models.PositiveSmallIntegerField(default=0, verbose_name=_("coordination"))
deception = models.PositiveSmallIntegerField(default=0, verbose_name=_("tromperie"))
discipline = models.PositiveSmallIntegerField(default=0, verbose_name=_("sang froid"))
leadership = models.PositiveSmallIntegerField(default=0, verbose_name=_("commandement"))
mechanics = models.PositiveSmallIntegerField(default=0, verbose_name=_("mécanique"))
medecine = models.PositiveSmallIntegerField(default=0, verbose_name=_("médecine"))
negociation = models.PositiveSmallIntegerField(default=0, verbose_name=_("négociation"))
perception = models.PositiveSmallIntegerField(default=0, verbose_name=_("perception"))
piloting = models.PositiveSmallIntegerField(default=0, verbose_name=_("pilotage"))
resilience = models.PositiveSmallIntegerField(default=0, verbose_name=_("résistance"))
skulduggery = models.PositiveSmallIntegerField(default=0, verbose_name=_("magouilles"))
stealth = models.PositiveSmallIntegerField(default=0, verbose_name=_("discretion"))
streetwise = models.PositiveSmallIntegerField(default=0, verbose_name=_("système D"))
survival = models.PositiveSmallIntegerField(default=0, verbose_name=_("survie"))
vigilance = models.PositiveSmallIntegerField(default=0, verbose_name=_("vigilance"))
# Combat skills
brawl = models.PositiveSmallIntegerField(default=0, verbose_name=_("pugilat"))
gunnery = models.PositiveSmallIntegerField(default=0, verbose_name=_("artillerie"))
lightsaber = models.PositiveSmallIntegerField(default=0, verbose_name=_("sabre laser"))
melee = models.PositiveSmallIntegerField(default=0, verbose_name=_("corps à corps"))
ranged_heavy = models.PositiveSmallIntegerField(default=0, verbose_name=_("distance (armes lourdes)"))
ranged_light = models.PositiveSmallIntegerField(default=0, verbose_name=_("distance (armes légères)"))
# Knowledge skills
core_world = models.PositiveSmallIntegerField(default=0, verbose_name=_("mondes du noyau"))
education = models.PositiveSmallIntegerField(default=0, verbose_name=_("education"))
lore = models.PositiveSmallIntegerField(default=0, verbose_name=_("culture"))
outer_rim = models.PositiveSmallIntegerField(default=0, verbose_name=_("bordure exterieure"))
underworld = models.PositiveSmallIntegerField(default=0, verbose_name=_("pègre"))
xenology = models.PositiveSmallIntegerField(default=0, verbose_name=_("xénologie"))
class Meta:
abstract = True
class Character(NamedModel, Statistics):
campaign = models.ForeignKey(
'Campaign', blank=True, null=True, on_delete=models.SET_NULL,
related_name='characters', verbose_name=_("campagne"))
player = models.ForeignKey(
'Player', blank=True, null=True, on_delete=models.SET_NULL,
related_name='characters', verbose_name=_("joueur"))
type = models.CharField(max_length=10, choices=CHARACTER_TYPES, verbose_name=_("type"))
species = models.CharField(max_length=20, choices=SPECIES, verbose_name=_("espèce"))
# Combat
initiative = models.PositiveSmallIntegerField(default=0, verbose_name=_("initiative"))
is_active = models.BooleanField(default=False, verbose_name=_("personnage actif ?"))
is_fighting = models.BooleanField(default=False, verbose_name=_("en combat ?"))
actual_health = models.PositiveSmallIntegerField(default=0, verbose_name=_("santé actuelle"))
actual_strain = models.PositiveSmallIntegerField(default=0, verbose_name=_("stress actuel"))
critical_wounds = models.PositiveSmallIntegerField(default=0, verbose_name=_("blessures critiques"))
# Position State - Direct combat dice modifiers
# Aiming => + 1 fortune dice on the character's ranged attack
aiming = models.BooleanField(default=False, verbose_name=_("visée"))
# Undercover => +1 misfortune dice on ranged attack targeting the character
undercover = models.BooleanField(default=False, verbose_name=_("sous couverture"))
# Guarded stance => +1 misfortune dice on character's attack and melee attack targeting the character
guarded_stance = models.BooleanField(default=False, verbose_name=_("en garde"))
# Dropped prone +1 misfortune dice on ranged attack and +1 fortune dice on melee attack targeting the character
dropped_prone = models.BooleanField(default=False, verbose_name=_("au sol"))
# Experience / Misc
actual_experience = models.PositiveSmallIntegerField(default=0, verbose_name=_("experience actuelle"))
total_experience = models.PositiveIntegerField(default=0, verbose_name=_("experience totale"))
money = models.PositiveSmallIntegerField(default=0, verbose_name=_("crédits"))
# NPC specific
max_health_value = models.PositiveSmallIntegerField(default=5, verbose_name=_("Points de vie (NPC)"))
minion_quantity = models.PositiveSmallIntegerField(default=1, verbose_name=_("nombre de personnages (Sbires)"))
special_skills_list = JsonField(blank=True, null=True, verbose_name=_("compétences spéciales (carrière, sbires, etc..)"))
def _get_attribute_modifier(self, attribute_name):
"""
Get the total value of an attribute modifiers
:param stat_name: attribute
:return: modifiers value
"""
stat_modifiers = self.applied_effects.filter(effect__type=EFFECT_ATTRIBUTE_MODIFIER, effect__attribute=attribute_name)
return stat_modifiers.aggregate(Sum('modifier_value')).get('modifier_value__sum') or 0
@property
def is_conscious(self):
# Only PC and Nemesis have strain
return self.actual_health > 0 and (self.actual_strain > 0 or self.type not in [CHARACTER_TYPE_PC, CHARACTER_TYPE_NEMESIS])
@property
def defense(self):
"""
Armor + talent
:return: defense value
"""
defense_value = self.inventory.filter(
equiped=True, item__type=ITEM_ARMOR).aggregate(Sum('item__defense')).get('item__defense__sum') or 0
defense_value += self._get_attribute_modifier(ATTRIBUTE_DEFENSE)
return defense_value
@property
def max_health(self):
"""
Brawn + species ability + talent
:return: max_health value
"""
if self.type == CHARACTER_TYPE_PC:
max_health_value = self.stats.get(STAT_BRAWN) + SPECIES_ABILITIES.get(self.species, {}).get('max_health', 10)
else:
max_health_value = self.max_health_value
if self.type == CHARACTER_TYPE_MINION:
max_health_value *= self.minion_quantity
max_health_value += self._get_attribute_modifier(ATTRIBUTE_MAX_HEALTH)
return max_health_value
@property
def max_strain(self):
"""
Willpower + species ability + talent
:return: max_health value
"""
max_strain_value = self.stats.get(STAT_WILLPOWER)
if self.type == CHARACTER_TYPE_PC:
max_strain_value += SPECIES_ABILITIES.get(self.species, {}).get('max_strain', 10)
max_strain_value += self._get_attribute_modifier(ATTRIBUTE_MAX_STRAIN)
return max_strain_value
@property
def max_charge(self):
"""
Brawn + 5
:return: max weigth value
"""
return 5 + self.stats.get(STAT_BRAWN)
@property
def actual_charge(self):
return self.inventory.annotate(
total_item_weight=F('item__weight') * F('quantity')).aggregate(
Sum('total_item_weight', output_field=FloatField())).get('total_item_weight__sum') or 0
@property
def is_overloaded(self):
return self.actual_charge > self.max_charge
@property
def soak_value(self):
"""
Brawn + armor + talent
:return: soak_value
"""
soak_value = self.stats.get(STAT_BRAWN)
soak_value += self.inventory.filter(
equiped=True, item__type=ITEM_ARMOR).aggregate(Sum('item__soak_value')).get('item__soak_value__sum') or 0
soak_value += self._get_attribute_modifier(ATTRIBUTE_SOAK_VALUE)
return soak_value
@property
def stats(self):
"""
Get all the stats values with potential modifiers
:return: dict (stat_name: final value)
"""
stats = {}
for stat_name in DICT_STATS.keys():
stat_value = getattr(self, stat_name, 0)
stat_value += self._get_attribute_modifier(stat_name)
if self.type != CHARACTER_TYPE_NEMESIS:
stat_value = min(stat_value, 5)
stats[stat_name] = max(stat_value, 0)
return stats
@property
def skills(self):
"""
Get all the skills values with potential modifiers
:return: dict (skill_name: final value)
"""
skills = {}
for skill_name in DICT_SKILLS.keys():
skill_value = getattr(self, skill_name, 0)
skill_value += self._get_attribute_modifier(skill_name)
if self.type != CHARACTER_TYPE_NEMESIS:
skill_value = min(skill_value, 5)
skills[skill_name] = max(skill_value, 0)
return skills
def get_skill_dice(self, skill_name, dice_upgrades=0, target=None, opposite=False):
"""
Get the aptitude/difficulty and mastery/challenge dice pool for a skill name
:param skill_name: skill name
:param upgrade_numbers: number of dice to upgrade
:param opposite: opposite test ? change the aptitude/mastery dice to difficulty/challenge
:return: dict of dice pool
"""
skill_value = self.skills.get(skill_name, 0)
for stat_name, skills in SKILL_DEPENDANCIES.items():
if skill_name in skills:
stat_value = self.stats.get(stat_name, 0)
break
else:
stat_value = 0
mastery_dice = min(skill_value, stat_value)
aptitude_dice = max(skill_value, stat_value) - mastery_dice
# Upgrade dice -> transform aptitude dice into mastery dice or add aptitude dice
# Minions gains upgrade dice on special skills (if any)
if self.type == CHARACTER_TYPE_MINION and skill_name in (self.special_skills_list or []):
dice_upgrades += self.minion_quantity - 1
if dice_upgrades:
for i in range(dice_upgrades):
if aptitude_dice:
aptitude_dice -= 1
mastery_dice += 1
else:
aptitude_dice += 1
result = {
'aptitude' if not opposite else 'difficulty': aptitude_dice,
'mastery' if not opposite else 'challenge': mastery_dice
}
# Dice modifier effects
dice_filter = dict(effect__type=EFFECT_DICE_POOL_MODIFIER, effect__attribute=skill_name)
dice_effects = self.applied_effects.filter(**dice_filter, effect__opposite_test=False)
# Opposite Modifiers - Effect affecting a test on the target character
if target:
target_dice_effects = CharacterEffect.objects.filter(
**dice_filter, target=target, effect__opposite_test=True)
dice_effects = dice_effects.union(target_dice_effects)
for dice_type, number in dice_effects.values_list('effect__dice', 'modifier_value'):
result[dice_type] = max(result.get(dice_type, 0) + number, 0)
return result
def modify_health(self, value, save=False):
"""
Remove health
:return:
"""
if not value:
return
health = self.actual_health + value
self.actual_health = max(min(health, self.max_health), 0)
# Update minions quantity
if value < 0 and self.type == CHARACTER_TYPE_MINION:
new_minion_quantity, modulo = divmod(self.actual_health, self.minion_quantity)
if modulo:
new_minion_quantity += 1
self.minion_quantity = new_minion_quantity
if save:
self.save(update_fields=['actual_health', 'minion_quantity'])
def modify_strain(self, value, save=False):
"""
Remove strain
:return:
"""
if not value:
return
# Only PC and Nemesis can loose strain / others take health damages instead
if self.type not in [CHARACTER_TYPE_PC, CHARACTER_TYPE_NEMESIS]:
return self.modify_health(value, save=save)
strain = self.actual_strain + value
self.actual_strain = max(min(strain, self.max_strain), 0)
if save:
self.save(update_fields=['actual_strain'])
def start_combat_turn(self):
self.is_active = True
self.save()
def end_combat_turn(self):
self.is_active = False
# Decrease turn duration or remove effects with turn duration
queryset = CharacterEffect.objects.select_related('target').filter(
Q(Q(duration_type=EFFECT_DURATION_SOURCE_TURN, source_character_id=self.id) | Q(
duration_type=EFFECT_DURATION_TARGET_TURN, target_id=self.id)))
# Apply health/strain modifiers effects
for effect in queryset.filter(effect__type__in=[EFFECT_HEALTH_MODIFIER, EFFECT_STRAIN_MODIFIER]):
effect.apply_direct_modifier(self if effect.target_id == self.id else effect.target)
queryset.filter(nb_turn=1).delete()
queryset.update(nb_turn=F('nb_turn') - 1)
self.save()
def end_fight(self):
self.is_fighting = False
# Recover strain
recovered_strain = max(self.stats.get(STAT_PRESENCE), self.skills.get(COOL))
self.modify_strain(value=recovered_strain)
self.save()
def rest(self):
self.actual_strain = self.max_strain
self.modify_health(value=1)
self.save()
# Tests
def attack(self, target, weapon_id=None, upgrade=0, range=RANGE_ENGAGED,**bonus_dice):
pass
def opposite_skill_test(self, skill_name, target, opposite_skill='', check_destiny=True,
dice_upgrades=0, **bonus_dice):
# Destiny upgrade ?
if check_destiny and self.campaign.get_destiny_upgrade(self):
dice_upgrades += 1
dice_pool = self.get_skill_dice(skill_name, target=target, dice_upgrades=dice_upgrades)
# Opposite test
# Destiny upgrade ?
target_dice_upgrades = 1 if check_destiny and self.campaign.get_destiny_upgrade(target) else 0
opposite_skill = opposite_skill or skill_name
dice_pool.update(target.get_skill_dice(opposite_skill, opposite=True, dice_upgrades=target_dice_upgrades))
for dice_type, value in bonus_dice.items():
dice_pool[dice_type] = dice_pool.get(dice_type, 0) + value
return {
'dice_pool': dice_pool,
'result': roll_dice(**dice_pool)
}
def skill_test(self, skill_name, difficulty=DIFFICULTY_SIMPLE, challenge=0, dice_upgrades=0, **bonus_dice):
has_difficulty = difficulty or challenge
# Destiny upgrade ?
if has_difficulty and self.campaign.get_destiny_upgrade(self):
dice_upgrades += 1
dice_pool = self.get_skill_dice(skill_name, dice_upgrades=dice_upgrades)
# Upgrade difficulty dice into challenge dice if destiny_upgrade is True
if has_difficulty and self.campaign.get_destiny_upgrade(self, opposite=True):
if difficulty:
difficulty -= 1
challenge += 1
else:
difficulty += 1
dice_pool.update({
DICE_TYPE_DIFFICULTY: difficulty,
DICE_TYPE_CHALLENGE: challenge
})
for dice_type, value in bonus_dice.items():
dice_pool[dice_type] = dice_pool.get(dice_type, 0) + value
return {
'dice_pool': dice_pool,
'result': roll_dice(**dice_pool)
}
def __str__(self):
return f'{self.name} - {self.get_species_display()} - ({self.player or self.get_type_display()})'
class Meta:
verbose_name = _("personnage")
verbose_name_plural = _("personnages")
class Effect(NamedModel):
type = models.CharField(max_length=20, choices=EFFECT_TYPES, verbose_name=_("type"))
# Modifier
attribute = models.CharField(max_length=15, choices=ATTRIBUTES, blank=True, verbose_name=_("attribut modifié"))
dice = models.CharField(max_length=10, choices=DICE_TYPES, blank=True, verbose_name=_("dé modifié"))
opposite_test = models.BooleanField(default=False, verbose_name=_("effet sur un test en opposition ?"))
class Meta:
verbose_name = _("effet")
verbose_name_plural = _("effets")
class Talent(NamedModel):
# Activation
activation_type = models.CharField(max_length=7, choices=ACTIVATION_TYPES, default=ACTIVATION_TYPE_PASSIVE,
verbose_name=_("type d'activation"))
class Meta:
verbose_name = _("talent")
verbose_name_plural = _("talents")
class EffectModifier(models.Model):
effect = models.ForeignKey('Effect', on_delete=models.CASCADE, related_name='+', verbose_name=_("effet"))
# Modifier
modifier_value = models.IntegerField(default=0, verbose_name=_("valeur du modificateur"))
# Activation
activation_type = models.CharField(max_length=7, choices=ACTIVATION_TYPES, default=ACTIVATION_TYPE_PASSIVE,
verbose_name=_("type d'activation"))
# Duration
duration_type = models.CharField(max_length=11, choices=EFFECT_DURATIONS, blank=True, verbose_name=_("type de durée"))
nb_turn = models.IntegerField(default=0, verbose_name=_("nombre de tours"))
def apply(self, targets, source_character_id=None, source_equipment_id=None, source_item_id=None, source_talent_id=None):
"""
Apply the effect on the targets
:param targets: targets
:param source_character_id: id of the source character
:param source_equipment_id: id of the source equipment
:param source_talent_id: id of the source talent
:return: None
"""
for target in targets:
if self.duration_type == EFFECT_DURATION_DIRECT:
# Direct health/strain modifier
self.apply_direct_modifier(target)
else:
CharacterEffect.objects.create(
target_id=target.id,
source_character_id=source_character_id,
source_equipment_id=source_equipment_id,
source_item_id=source_item_id,
source_talent_id=source_talent_id,
effect=self.effect,
modifier_value=self.modifier_value,
activation_type=self.activation_type,
duration_type=self.duration_type,
nb_turn=self.nb_turn
)
def apply_direct_modifier(self, target):
"""
Apply a direct effect modifier on the target (health or strain)
:param target: target
:return: None
"""
if self.effect.type == EFFECT_HEALTH_MODIFIER:
target.modify_health(self.modifier_value, save=True)
elif self.effect.type == EFFECT_STRAIN_MODIFIER:
target.modify_strain(self.modifier_value, save=True)
def __str__(self):
return f'{self.effect} - valeur: {self.modifier_value} - nombres de tours: {self.nb_turn}'
class Meta:
abstract = True
class ItemEffect(EffectModifier):
item = models.ForeignKey('Item', on_delete=models.CASCADE, related_name='effects', verbose_name=_("objet"))
class Meta:
verbose_name = _("effet d'objet")
verbose_name_plural = _("effets d'objet")
class TalentEffect(EffectModifier):
talent = models.ForeignKey('Talent', on_delete=models.CASCADE, related_name='effects', verbose_name=_("talent"))
class Meta:
verbose_name = _("effet de talent")
verbose_name_plural = _("effets de talents")
class CharacterEffect(EffectModifier):
# Source
source_character = models.ForeignKey('Character', on_delete=models.SET_NULL, blank=True, null=True,
related_name='generated_effects',
verbose_name=_("personnage source"))
source_equipment = models.ForeignKey('Equipment', on_delete=models.SET_NULL, blank=True, null=True,
related_name='generated_effects',
verbose_name=_("equipement source"))
source_item = models.ForeignKey('Item', on_delete=models.SET_NULL, blank=True, null=True,
related_name='generated_effects',
verbose_name=_("objet source"))
source_talent = models.ForeignKey('Talent', on_delete=models.SET_NULL, blank=True, null=True,
related_name='generated_effects',
verbose_name=_("talent source"))
target = models.ForeignKey('Character', on_delete=models.CASCADE, related_name='applied_effects',
verbose_name=_("personnage cible"))
def __str__(self):
return f'cible: {self.target} / effet: {self.effect}'
class Meta:
verbose_name = _("effet de personnages")
verbose_name_plural = _("effets de personnages")
class Item(NamedModel):
type = models.CharField(max_length=10, choices=ITEM_TYPES, verbose_name=_("type"))
weight = models.FloatField(default=0.0, verbose_name=_("encombrement"))
price = models.PositiveIntegerField(default=0, verbose_name=_("prix"))
hard_point = models.PositiveIntegerField(default=0, verbose_name=_("emplacement d'améliorations"))
skill = models.CharField(max_length=10, choices=ITEM_SKILLS, blank=True, null=True, verbose_name=_("compétence associée"))
# Weapon Specific
range = models.CharField(max_length=10, choices=RANGE_BANDS, blank=True, verbose_name=_("portée"))
damage = models.PositiveSmallIntegerField(default=0, verbose_name=_("dégats"))
skill_based_damage = models.CharField(max_length=15, choices=ATTRIBUTES, blank=True, verbose_name=_("dégats basés sur attribut"))
critique = models.PositiveSmallIntegerField(default=0, verbose_name=_("critique"))
# Armor Specific
soak_value = models.PositiveSmallIntegerField(default=0, verbose_name=_("valeur d'encaissement"))
defense = models.PositiveSmallIntegerField(default=0, verbose_name=_("défense"))
@property
def is_equipable(self):
"""
Objet équipable ?
"""
return self.type in (ITEM_WEAPON, ITEM_ARMOR)
def add_to_inventory(self, character_id, quantity=1):
"""
Add the item in the character's inventory
:param character_id: character's id
:param quantity: quantity of the item to add in the character's inventory
:return: None
"""
# Add quantity if the same item is already in the character's inventory, else create the equipment
if not Equipment.objects.filter(
character_id=character_id, item_id=self.id).update(quantity=F('quantity') + quantity):
Equipment.objects.create(character_id=character_id, item_id=self.id, quantity=quantity)
class Meta:
verbose_name = _("objet")
verbose_name_plural = _("objets")
class Equipment(models.Model):
character = models.ForeignKey('Character', on_delete=models.CASCADE, related_name='inventory', verbose_name=_("personnage"))
item = models.ForeignKey('Item', on_delete=models.CASCADE, related_name='+', verbose_name=_("objet"))
quantity = models.PositiveIntegerField(default=1, verbose_name=_("quantité"))
equiped = models.BooleanField(default=False, verbose_name=_("équipé ?"))
def equip(self):
self.equiped = True
# Passive effects activation
for effect in self.item.effects.filter(activation_type=ACTIVATION_TYPE_PASSIVE).all():
effect.apply(targets=[self.character], source_equipment_id=self.id, source_character_id=self.character_id)
self.save()
def unequip(self):
self.equiped = False
CharacterEffect.objects.filter(activation_type=ACTIVATION_TYPE_PASSIVE, source_equipment__id=self.id).delete()
self.save()
def use_consumable(self, targets_ids):
"""
Use consumable (medipack/grenade/..)
:param targets_ids: ids of the targets
:return: None
"""
targets = Character.objects.filter(id__in=targets_ids)
for effect in self.item.effects.all():
effect.apply(targets, source_character_id=self.character_id, source_item_id=self.item_id)
# Remove consumable from inventory
if self.quantity == 1:
self.delete()
else:
self.quantity -= 1
self.save()
def __str__(self):
return f'objet: {self.item} / personnage: {self.character} / quantité: {self.quantity} / Equipé: {self.equiped}'
class Meta:
verbose_name = _("équipement")
verbose_name_plural = _("équipements")
ALL_MODELS = (
Player,
Campaign,
Character,
CharacterEffect,
Effect,
Item,
ItemEffect,
Equipment,
Talent,
TalentEffect
)
|
# coding: utf-8
import paho.mqtt.client as mqtt
import os
import re
import RPi.GPIO as GPIO
from os.path import join, dirname
from dotenv import load_dotenv
from modules.re_compiler import ReMatch
from modules.led_flash import Flash
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
def on_connect(client, userdata, flags, respons_code):
print('Connected with result code '+str(respons_code))
print('Connected to cloudMQTT')
client.subscribe('#')
def on_message(client, userdata, msg):
message = msg.payload.decode('utf-8')
print(msg.topic+' '+message)
try:
blue = Flash(BCM_NUM=13)
yellow = Flash(BCM_NUM=19)
if 'flash both of blue and yellow' in message.lower():
for _j in range(5):
yellow.flash(COUNT=1)
blue.flash(COUNT=1)
if 'flash yellow' in message.lower():
yellow.flash(COUNT=10)
if 'flash blue' in message.lower():
blue.flash(COUNT=10)
except Exception as e:
print(e)
GPIO.cleanup()
else:
del blue
del yellow
def sub_main():
client = mqtt.Client(protocol=mqtt.MQTTv311)
client.on_connect = on_connect
client.on_message = on_message
client.tls_set('/etc/ssl/certs/ca-certificates.crt')
client.username_pw_set(os.environ["CLOUD_MQTT_USERNAME"], os.environ["CLOUD_MQTT_PASSWORD"])
client.connect(os.environ["CLOUD_MQTT_URL"], int(os.environ["CLOUD_MQTT_SSL_PORT"]), keepalive=60)
client.loop_forever()
if __name__ == '__main__':
sub_main()
|
import image
import meme
from flask import Flask, jsonify
from flask_restful import Api, Resource, reqparse
app = Flask( __name__ )
api = Api( app )
class Meme(Resource):
def get(self, query, message):
return {'meme': meme.makememe( message, image.downloadimage(query) ) }, 200
api.add_resource(Meme, '/meme/<string:query>/<string:message>')
if __name__ == '__main__':
app.run(debug=True) |
#!/usr/bin/env python
# ----------------------------------------------------------
# Tests - Glass Server
# ----------------------------------------------------------
# Folder where all Tests are held for glass server.
#
# This init.py file
#
# ---------------------------------------------------------------
import os, sys
import logging
extension = '.py'
def scan_folders(extension):
#Scan through all folders within modules looking for _mod.py files.
#And also variable.txt files.
ext_len = len(extension)
test_py_files = []
for root, dirs, files in os.walk(__name__+ os.sep + 'tests'):
#print root, dirs, files
for f in files:
#Scan through files, find ones with correct extension.
if f!= "__init__.py":
if f[-ext_len:] == extension:
test_py_files.append(os.path.join(root, f))
return test_py_files
def import_test_files(test_files):
#print mod_files
list = []
for f in test_files:
test_name = f[:-3].replace(os.sep,'.')
#print test_name
try:
exec('import ' + test_name)
except ImportError:
#print "IMPORT ERROR"
#print os.path.split(sys.path[0])[0] + '\\test'
sys.path.append(os.path.split(sys.path[0])[0])
exec('import ' + 'test.' + test_name)
exec('list.append(%s)' %test_name)
return list
def run_test():
for i in test_list:
try:
i.thetest.step()
except:
logging.warning("Running Test Exception %r" ,i.thetest.name)
def AJAX_list(active_list):
out = []
count = 0
for test in test_list:
t = test.thetest
#Set active state of tests, from active_list from webpage.
if active_list != None: #If webpage doesn't have what's active then don't change anything.
if count in active_list:
t.active = True
else:
t.active = False
out.append([t.active, t.name, t.filename, t.message])
count +=1
return out
#Main program
test_files = scan_folders(extension)
#print test_files
test_list = import_test_files(test_files)
#import Main.airspeed_mod
|
#Functions can return Something
def add(a,b):
print "ADDING %d + %d " %(a,b)
return a+b
def subtract(a,b):
print "SUBTRACTING %d - %d " %(a,b)
return a-b
def multiply(a,b):
print "MULTIPLYING %d*%d " %(a,b)
return a*b
def divide(a,b):
print "DIVIDING %d/%d " %(a,b)
return a/b
print "Let's do some math with just functions!"
age=add(30,5)
height=subtract(78,4)
weight=multiply(90,2)
iq=divide(100,2)
print "Age: %d ,Height : %d, Weight:%d ,IQ: %d " %(age,height,weight,iq)
print "Here is the puzzle."
what=add(age,subtract(height,multiply(weight,divide(iq,2))))
print "That becomes: ", what |
#!/usr/bin/python
def palindrone(num):
num = str(num)
l = len(num)
for i in range(l // 2):
if num[i] != num[l - i - 1]:
return False
return True
total = 0
for i in range(10000):
temp = i
for j in range(50):
temp += int(str(temp)[::-1])
if palindrone(temp):
total += 1
break
print(10000 - total)
|
import json
import torch
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from torch.utils.data import Dataset
import torch as pt
from preprocessing import read_split_images
from word2vec import train_w2v, use_w2v
PATH = "input/"
class FLICKR30K(Dataset):
w2v_model = None
count_model = None
def __init__(self, mode="train", limit=-1, word_transformer="w2v"):
super().__init__()
assert mode in ['train', 'val', 'test']
self.mode = mode
internal_set_images = read_split_images(path=PATH, mode=self.mode, limit=limit)
internal_set_captions = json.load(open('output/data/{}.json'.format(self.mode), 'r'))
self.image_labels = internal_set_images.iloc[:, 0]
internal_set_images = internal_set_images.drop(0, 1)
self.images = internal_set_images.to_numpy()
self.caption_labels = list(internal_set_captions.keys())
if word_transformer == "w2v":
if mode == 'train':
self.captions, FLICKR30K.w2v_model = train_w2v(internal_set_captions.values())
else:
self.captions = use_w2v(internal_set_captions.values(), FLICKR30K.w2v_model)
elif word_transformer == "bow":
if mode == 'train':
FLICKR30K.c_vec = CountVectorizer(stop_words='english', min_df=1, max_df=100000)
self.captions = FLICKR30K.c_vec.fit_transform(internal_set_captions.values())
else:
# transform on train/val/test set
self.captions = FLICKR30K.c_vec.transform(internal_set_captions.values())
else:
print("word_transformer argument should be either w2v or bow")
if limit > -1:
self.captions = self.captions[:limit * 5]
self.caption_labels = self.caption_labels[:limit * 5]
self.captions_per_image = len(self.caption_labels) / len(self.image_labels)
self.images = np.repeat(self.images, repeats=self.captions_per_image,
axis=0)
self.caption_indices = np.random.permutation(len(self.images))
self.image_indices = np.random.permutation(len(self.images))
self.captions = self.captions[self.caption_indices]
self.caption_labels = self.captions[self.caption_indices]
self.images = self.images[self.image_indices]
self.image_labels = self.images[self.image_indices]
def __getitem__(self, index):
if self.mode == 'train':
return self.image_indices[index], pt.tensor(self.images[index]).float(), self.caption_indices[
index], pt.tensor(
self.captions[index]).float()
else:
## return in order: always linked to eachother
return pt.tensor(self.images[self.image_indices[index]]).float(), pt.tensor(self.captions[self.caption_indices[index]]).float()
def __len__(self):
return len(self.captions)
def get_dimensions(self):
return self.images.shape[1], self.captions.shape[1]
|
def solution(n):
answer = [ i for i in range(2, n + 1, 2)]
return sum(answer) |
# Santosh Khadka
# Python collections module
# Specialized container types
from collections import Counter # NOTE: make sure the file you're working on is not called "collections" !!!
from collections import defaultdict # Assigns default value in the instance a KeyError would've occured. - Prevents KEY ERROR
from collections import namedtuple # Useful for very large tuples or when you cant remember what values are at what index.
mylist = [1, 1, 1, 1, 2, 2, 2, 3, 3, 5, 5, 6, 7, 8, 8, 9, 9, 9, 0, 0, 0, 0, 0, 0, 0]
# Counter is a dictionary subclass that helps count hashable objects
# Key is the object, value is the count
counted = Counter(mylist)
# print(counted.keys())
# print(counted.values())
sentence = "How many times does each word show up in this sentence with a word"
countSentece = Counter(sentence.split()) # Can also: Counter(sentence.lower().split())
# print(countSentece) # Prints from highest to lowest count: Counter({'word': 2, 'How': 1, 'many': 1, 'times': 1, 'does': 1, 'each': 1, 'show': 1, 'up': 1, 'in': 1, 'this': 1, 'sentence': 1, 'with': 1, 'a': 1})
letters = 'aaaaaaabbbbcccccdddddeeeeeeeeeee'
countLetters = Counter(letters)
# print(countLetters) # Doesnt keep original order: Counter({'e': 11, 'a': 7, 'c': 5, 'd': 5, 'b': 4})
# for key, value in countLetters.items():
# print(key, value)
# print(key)
# print(value)
# key = "a"
# print("Key:", key, "Value:", countLetters[key]) # 7
# print(countLetters.keys()) # odict_keys(['a', 'b', 'c', 'd', 'e'])
# print(list(countLetters.keys()))
'''
Common patterns when using the Counter() object:
sum(c.values()) # total of all counts
c.clear() # reset all counts
list(c) # list unique elements
set(c) # convert to a set
dict(c) # convert to a regular dictionary
c.items() # convert to a list of (elem, cnt) pairs
Counter(dict(list_of_pairs)) # convert from a list of (elem, cnt) pairs
c.most_common()[:-n-1:-1] # n least common elements
c += Counter() # remove zero and negative counts
'''
# for key in countLetters.items():
# print(key)
# print(countLetters.values()) # odict_values([7, 4, 5, 5, 11])
# print(list(countLetters.values()))
# print(countLetters.items()) # odict_items([('a', 7), ('b', 4), ('c', 5), ('d', 5), ('e', 11)])
# print(list(countLetters.items()))
# print(list(countLetters)) # ['a', 'b', 'c', 'd', 'e']
# print(list(countLetters))
## Most Common
# print(countLetters.most_common(3)) # [('e', 11), ('a', 7), ('c', 5)]
dict1 = {'a':10}
# print(dict1)
# print(dict1['a'])
# print(dict1['z']) # KeyError - key not in dict
def_dict = defaultdict(lambda:0) # all default values are 0
def_dict['a'] = 1
# print(dict(def_dict))
def_dict['b']
# print(dict(def_dict))
myTuple = (10, 20, 30)
print(myTuple[0])
Dog = namedtuple('Dog', ['age', 'breed', 'name'])
husk = Dog(age=5, breed="Husky", name="Husk")
print(type(husk))
print(husk.age)
print(husk.breed)
print(husk.name) |
import datetime
import hashlib
import importlib
import os
import sys
import time
from collections import namedtuple
from capturing import Capturing
from status import Status
from virtualenvironment import IsolatedVirtualEnvironment
import database
db = database.getInstance()
Chord = namedtuple('Chord', ['path', 'name'])
def findChords(directory, logging):
chords = []
logging.debug('Looking for chords in %s', directory)
for path, dirs, files in os.walk(directory):
files = [f for f in files if not f.endswith('.pyc')]
files = [f for f in files if f.endswith('.py')]
for file in files:
name = file.split('.')[0]
if name != '__init__':
logging.debug('Found chord %s', file)
chords.append(Chord(path, name))
return chords
def execute(function):
if sys.platform == 'win32':
timer = time.clock
else:
timer = time.time
t0 = timer()
error = None
with Capturing() as output:
try:
function()
except Exception as e:
error = e
# Convert to milliseconds
elapsed = (timer() - t0) * 1000
return (elapsed, output, error)
def shouldRun(module, now, logging):
run = False
if hasattr(module, 'shouldRun'):
try:
run = module.shouldRun(now)
except Exception as e:
logging.warn('Could not run shouldRun() on %s, %s', module.__name__, e)
logging.debug('shouldRun() returned %s', run)
else:
logging.debug(
'No shouldRun() found on %s, assuming it is a library',
module.__name__)
run = False
return run
def requiresVirtualEnv(module):
return hasattr(module, 'requirements') and isinstance(module.requirements, (list, tuple))
def virtualEnvSignature(requirements):
hasher = hashlib.sha1()
hasher.update(",".join(sorted(set(requirements))))
return hasher.hexdigest()
def virtualEnvForModule(module, virtualEnvDirectory, logging):
logging.debug('Checking if %s needs to run in a virtualenv', module.__name__)
virtualEnv = None
if requiresVirtualEnv(module):
logging.debug('%s requires virtualenv', module.__name__)
virtualEnv = os.path.join(virtualEnvDirectory, virtualEnvSignature(module.requirements))
logging.debug('virtualenv for %s is %s', module.__name__, virtualEnv)
return virtualEnv
def runModule(chord, virtualEnvDirectory, now, logging):
logging.debug("-" * 60)
try:
logging.debug('Adding %s to path', chord.path)
sys.path.insert(0, chord.path)
module = importlib.import_module(chord.name)
except Exception as e:
logging.warn('Could not import %s: %s', chord.name, e)
return
logging.debug('Considering whether to run %s', chord.name)
if shouldRun(module, now, logging):
virtualEnv = virtualEnvForModule(module, virtualEnvDirectory, logging)
start_time = int(time.time())
status = Status.FAIL
logging.debug('Running main method on %s', chord.name)
with IsolatedVirtualEnvironment(module, virtualEnv, logging):
executionTime, output, error = execute(module.main)
logging.debug('Ran main method on %s in %f ms', chord.name, executionTime)
if error is None:
status = Status.SUCCESS
else:
logging.error('Failed to run %s: %s', chord.name, error)
output = "%s\n%s" % (output, error)
logging.debug('Removing %s from path', sys.path[0])
del sys.path[0]
db.recordExecution(start_time, chord.name, executionTime,
status, str(output))
def run(directory, virtualEnvDirectory, now, logging):
chords = findChords(directory, logging)
for chord in chords:
runModule(chord, virtualEnvDirectory, now, logging)
|
from controlpanel import ISEOControlpanel; ISEOControlpanel
|
from pandas import *
from ggplot import *
path = 'c:\\Leah\\udacity\\P1\\turnstile_data_master_with_weather.csv'
df = pandas.read_csv(path)
df['DATEd'] = to_datetime(df['DATEn'], format = '%Y-%m-%d')
df['weekday'] = df['DATEd'].dt.weekday
grouped = df.groupby(['weekday'], as_index = False).mean()
print grouped.head()
#p = ggplot (grouped, aes(x='DATEn')) + geom_bar(aes(weight='ENTRIESn_hourly'),color='black',fill='steelblue')+\
# scale_x_discrete()
#print (p) |
from marshmallow import Schema, fields, EXCLUDE, post_load
from eddn.journal_v1.model import JournalV1, Header, Message
class BaseSchema(Schema):
class Meta:
unknown = EXCLUDE
class HeaderSchema(BaseSchema):
uploader_id = fields.String(required=True, data_key="uploaderID")
software_name = fields.String(required=True, data_key="softwareName")
software_version = fields.String(required=True, data_key="softwareVersion")
gateway_timestamp = fields.DateTime(allow_none=True, data_key="gatewayTimestamp")
@post_load
def to_domain(self, data, **kwargs) -> Header:
return Header(**data)
class MessageSchema(BaseSchema):
event = fields.String(required=True)
star_pos = fields.List(fields.Float(), Required=True, data_key="StarPos")
system_name = fields.String(required=True, data_key="StarSystem")
system_address = fields.Integer(required=True, data_key="SystemAddress")
timestamp = fields.DateTime(required=True)
dist_from_star_ls = fields.Float(allow_none=True, data_key="DistFromStarLS")
market_id = fields.Integer(allow_none=True, data_key="MarketID")
station_allegiance = fields.String(allow_none=True, data_key="StationAllegiance")
station_name = fields.String(allow_none=True, data_key="StationName")
station_type = fields.String(allow_none=True, data_key="StationType")
@post_load
def to_domain(self, data, **kwargs) -> Message:
return Message(**data)
class JournalV1Schema(BaseSchema):
header = fields.Nested(HeaderSchema, required=True)
message = fields.Nested(MessageSchema, required=True)
@post_load
def to_domain(self, data, **kwargs) -> JournalV1:
return JournalV1(**data)
|
# Generated by Django 2.2.4 on 2019-10-13 15:37
from django.db import migrations
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('billings', '0005_auto_20191013_2215'),
]
operations = [
migrations.AlterField(
model_name='payment',
name='amount',
field=djmoney.models.fields.MoneyField(decimal_places=2, default_currency='MYR', max_digits=14),
),
]
|
# Primality test using two optimization methods in comparison with a naive solution that
# iterates through all the positive integers less than the input number, to check if any divides it
# 1-Testing integers only up to the square root of the input number (trial division)
# 2-Testing integers only of the form 6k ± 1 after checking if the input number is even or divisible by 3
# given that all prime numbers, except 2 and 3, are of this form
def primality(n):
if(n <= 3): # 2 and 3 are prime numbers
return True
if(n % 2 == 0): # Input number is even
primality.m = 2
return False
if(n % 3 == 0): # Input number is divisible by 3
primality.m = 3
return False
m = 5 # Trying numbers of the form 6k ± 1 up to the square root of the input number
while(m * m <= n):
if(n % m == 0):
primality.m = m
return False
m = m + 2
if(n % m == 0):
primality.m = m
return False
m = m + 4
return True
nbr = input("\nHello ! Enter a positive number greater than one to check if it's prime: ")
if(nbr.isdigit() and int(nbr) > 1):
print("\nThe number you entered is prime") if primality(int(nbr)) else print("\nThe number you entered is not prime. It's divisible by " + str(primality.m))
else:
print("\nYou didn't enter a valid number ! By definition, a prime number must be a positive integer greater than one")
|
# test 1
customs = ["lili", "amy", "sam", "fox"]
if customs:
for custom in customs:
if custom == "amy":
print("hello amy, would you like to see some status report")
else:
print("hello " + custom + " , thanks for logging in.")
else:
print("We need to find some users")
print("===========================================================================================")
# test 2
names = ["daming","lili","lingling","rex"]
new_names = ["Daming", "amy", "sam"]
for name in names:
for new_name in new_names:
if new_name.lower() == name.lower():
print(new_name + " you need find a new name")
print("===========================================================================================")
# test 3
nums = [num for num in range(1,10)]
for num in nums:
if num == 1:
print("1st")
elif num == 2:
print("2nd")
else:
print(str(num) +"th")
'''
在诸如 == 、 >= 和 <= 等比较运算符两边各添加一个空格,例如, if
age < 4: 要比 if age<4: 好
''' |
#!/usr/bin/env python
# coding: utf-8
# In[36]:
import docplex.mp.model as cpx
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
from math import sqrt
import networkx as nx
# In[88]:
location = 'C:/Users/dell pc/Desktop/Sem 8/INDR460-OPERATIONS RESEARCH APPLICATIONS/assignments/assignment 5/TSPdata.xlsx'
df_r =pd.read_excel(location,sheetname='Sheet3',header=None,skiprows=2)
lat = df_r[1].tolist()
long = df_r[2].tolist()
# df_r
df_r[0].tolist()
len(df_r[0].tolist())
# In[89]:
class MyClass(object):
def __init__(self,n_id, lat,long):
self.n_id = n_id
self.lat = lat
self.long = long
my_objects = []
# In[90]:
for i in range(len(df_r[0].tolist())):
my_objects.append(MyClass(df_r[0].tolist()[i],df_r[1].tolist()[i],df_r[2].tolist()[i]))
#my_objects[14].long
t_n = len(df_r[0].tolist())
# In[91]:
cij = [[0 for i in range(t_n)] for j in range(t_n)]
for i in range(t_n):
for j in range(t_n):
cij[i][j] = abs(sqrt((my_objects[i].lat-my_objects[j].lat)**2 + (my_objects[i].long-my_objects[j].long)**2))
# In[92]:
opt_model = cpx.Model(name="MIP Model")
xij = opt_model.binary_var_matrix(t_n, t_n)
for i in range(t_n):
opt_model.add_constraint(xij[i,i] == 0)
for i in range(t_n):
# print(obj.number)
opt_model.add_constraint(opt_model.sum(xij[i,j] for j in range(t_n)) == 1)
for i in range(t_n):
# print(obj.number)
opt_model.add_constraint(opt_model.sum(xij[j,i] for j in range(t_n)) == 1)
# In[93]:
opt_model.minimize(opt_model.sum(xij[i,j]*cij[i][j] for i in range(t_n) for j in range(t_n)))
url = 'https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1/'
key = 'api_555476e8-b9e9-4d02-a523-cd50d8bbd4d5'
k_n = 0
# In[94]:
for it in range(20):
s = opt_model.solve(url=url,key=key)
G = nx.DiGraph()
G.add_nodes_from(range(t_n))
for i in range(t_n):
for j in range(t_n):
if xij[i,j].solution_value == 1:
#print(i,j)
G.add_edge(i,j)
a_list = list(nx.simple_cycles(G))
if len(a_list) == 1:
break
for a_it in range(len(a_list)-1):
k = a_list[a_it]
print(k)
k_n = k_n +1
a_n = list(range(t_n))
for i in k:
a_n.remove(i)
opt_model.add_constraint(opt_model.sum(xij[k[i],j] for i in range(len(k)) for j in a_n) == 1)
####################################################################################################################
# In[95]:
from matplotlib.pyplot import figure
figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k')
# nx.draw(G,with_labels=True,pos=nx.circular_layout(G))
nx.draw(G,with_labels=True)
fig= plt.figure(figsize=(60,30))
plt.show()
# In[107]:
print("The minimum distance tour is objective")
print("The number of cut", k_n)
opt_model.print_solution()
# In[ ]:
# In[ ]:
|
# -*- coding: utf-8 -*-
# Import libraries
import os
import sys
import string
import copy
import datetime
import numpy as np
import pandas as pd
import time
from random import randint
from bs4 import BeautifulSoup
from selenium import webdriver
from Scraper import*
#------------------------------------------------------------------------------
# Functions for continuously saving all the listings
def scrape():
scraper = Scraper()
print('Done!')
|
from helper import Helper
from main_loader import Main_loader
from histogram_comparer import Hsv
import os
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from sklearn import tree
from sklearn.metrics import accuracy_score
class Trivial_algo(Helper):
def __init__(self, main_folder, init_loader=True):
Helper.__init__(self)
self.main_folder = main_folder
self.histograms = Hsv()
self.hist_dict = {}
if init_loader:
self.main_loader = Main_loader({'koliba': 'SYNOPs/BA_Koliba_SYNOP_2014-2016.txt', 'airport':'SYNOPs/BA_Letisko_SYNOP_2014-2016.txt' }, None)
self.main_loader.create_synops_dict()
self.main_loader.load_obj('imgs')
self.main_loader.strip_to_datetime()
def iterate_year(self):
years = os.listdir(self.main_folder)
for y in years:
print(y)
if int(y) < 2017:
self.iterate_month(y)
#return
def iterate_month(self, year):
months = [self.main_folder + '/' + year +'/' + m for m in os.listdir(self.main_folder + '/' + year)]
#print (months)
for m in months:
print('month ' + str(m))
self.iterate_days(m)
def iterate_days(self, year_month):
'''
now has also funcionality of create_standardized_dict and iterate_days_for_histo_creation
'''
#fisheyes/2014/06/24/fisheye_1414641474324_1403630832716_00013_20140624_192851_jpg
days = os.listdir(year_month)
y_m_d = [year_month + '/' + d for d in days]
for img_dir in y_m_d:
for img in os.listdir(img_dir):
path = img_dir + '/' + img
img_read = cv.imread(path)
histo = self.histograms.histogram_hsv(img_read)
if histo is None:
print path
break
standard_key = self.standardize_date(img)
if standard_key in self.hist_dict.keys():
self.hist_dict[standard_key] = np.vstack((self.hist_dict[standard_key], [histo]))
else:
self.hist_dict[standard_key] = [histo]
if img_read.shape != (150,150,3):
print(img_read.shape)
def iterate_dataset(self):
print(len(set(self.hist_dict.keys())))
for k in self.hist_dict.keys():
print(k)
print(self.main_loader.synops_avg[k])
def create_tree(self):
clf = tree.DecisionTreeClassifier()
common_keys = set(self.hist_dict.keys()).intersection(set(self.main_loader.synops_avg.keys()))
train = set()
validate= set()
for i in common_keys:
if '201502' in i:
validate.add(i)
else:
train.add(i)
print('train size ' + str(len(train)))
print('validate size ' + str(len(validate)))
hist_sorted = [self.hist_dict[key] for key in sorted(train)] # access histograms from hist_dict based on cmn keys with synops
synops_sorted = [int(self.main_loader.synops_koliba[key]) for key in sorted(train)]
hist_sorted_v = [self.hist_dict[key] for key in sorted(validate)]
synops_sorted_v = [int(self.main_loader.synops_koliba[key]) for key in sorted(validate)]
clf.fit(hist_sorted, synops_sorted)
y_pred = list(clf.predict(hist_sorted_v))
y_true = synops_sorted_v
print(accuracy_score(y_true, y_pred))
self.absolute_results(y_true, y_pred)
def flatten_dset(self):
common_keys = set(self.hist_dict.keys()).intersection(set(self.main_loader.synops_avg.keys()))
train = set()
validate= set()
for i in common_keys:
if '201502' in i:
validate.add(i)
else:
train.add(i)
print('train size ' + str(len(train)))
print('validate size ' + str(len(validate)))
hist_sorted = list()
synops_sorted = list()
for k in sorted(train):
for v in self.hist_dict[k]:
hist_sorted.append(v)
synops_sorted.append(int(self.main_loader.synops_koliba[k]))
hist_sorted_v = list()
synops_sorted_v = list()
for k in sorted(validate):
for v in self.hist_dict[k]:
hist_sorted_v.append(v)
synops_sorted_v.append(int(self.main_loader.synops_koliba[k]))
print('sizes:')
print(len(hist_sorted))
print(len(synops_sorted))
print(len(hist_sorted_v))
print(len(synops_sorted_v))
self.fit_tree(hist_sorted, synops_sorted, hist_sorted_v, synops_sorted_v)
def fit_tree(self, hist_sorted, synops_sorted, hist_sorted_v, synops_sorted_v):
clf = tree.DecisionTreeClassifier()
print(len(hist_sorted))
print('llen')
print(len(hist_sorted[0]))
print(hist_sorted[0].shape)
clf.fit(hist_sorted, synops_sorted)
y_pred = list(clf.predict(hist_sorted_v))
y_true = synops_sorted_v
print(accuracy_score(y_true, y_pred))
self.absolute_results(y_true, y_pred)
def absolute_results(self, y_true, y_predict):
err = 0
rel_err = 0
for t,p in zip(y_true, y_predict):
if t != p:
err+=1
rel_err += abs(t - p)
rel_err = rel_err / len(y_true)
print('abs error count on size ' + str(len(y_true)))
print(err)
print('relative err count')
print(rel_err)
t_algo = Trivial_algo('fisheyes', True)
t_algo.iterate_year()
t_algo.flatten_dset()
#t_algo.iterate_dataset()
#t_algo.create_tree()
|
# Author Yinsen Miao
import pandas as pd
import numpy as np
import lightgbm
from sklearn.metrics import mean_squared_error
from math import sqrt
import seaborn as sns
from optuna.samplers import TPESampler
from sklearn.model_selection import train_test_split
import optuna
sns.set_context("poster")
dat = pd.read_pickle("../data/cleandata2.pkl")
# filter the data via the timestamp
# end_date = "20141231"
end_date = "20201231"
dat.query("SALEDATE <= '%s'" % end_date, inplace=True)
# data engineering
origin_date = "1990-01-01" # let us use this date as the origin
dat["TIME"] = (dat["SALEDATE"] - pd.to_datetime(origin_date)).dt.days / 30
dat["MONTH"] = dat["SALEDATE"].dt.month
dat["LOGPRICE"] = np.log(dat["PRICE"])
dat["LOGFAIRMARKETTOTAL"] = np.log(dat["FAIRMARKETTOTAL"])
dat["LOGLOTAREA"] = np.log(dat["LOTAREA"])
dat["LOGFINISHEDLIVINGAREA"] = np.log(dat["FINISHEDLIVINGAREA"])
dat["PRICEPERSQFT"] = dat["FAIRMARKETTOTAL"] / dat["LOTAREA"]
# segment the house based on FAIRMARKETTOTAL of 2012
tiers_cut = dat["FAIRMARKETTOTAL"].quantile([0.05, 0.35, 0.65, 0.95]).tolist()
dat["TIERS"] = pd.cut(dat["FAIRMARKETTOTAL"], tiers_cut, labels=["Bottom", "Middle", "Top"])
# let us drop all data that are considered as outliers
dat.dropna(axis=0, inplace=True)
# convert object column to category type
for col in ["NEIGHCODE", "EXTFINISH_DESC", "STYLEDESC", "MUNICODE", "TIERS"]:
dat[col] = dat[col].astype("category")
# split the data into training and testing based on 80-20 rule
train_dat, valid_dat = train_test_split(dat, train_size=0.8, random_state=2021)
ntrain, nvalid = len(train_dat), len(valid_dat)
print("ntrain = %d, ntest = %d" % (ntrain, nvalid))
# continuous features
x_feats = [
'TIME', 'GRADERANK', 'CDURANK', 'SCHOOLRANK',
'STORIES', 'BEDROOMS', 'ADJUSTBATHS', 'BSMTGARAGE', 'FIREPLACES', 'YEARBLT', 'BASEMENT',
'LOGLOTAREA', 'LOGFINISHEDLIVINGAREA', 'PRICEPERSQFT',
'LATITUDE', 'LONGITUDE', 'ANXIETY', 'OLD', 'POOR', 'VACANT'
]
# nominal features
x_categorical_feats = [
'NEIGHCODE', 'EXTFINISH_DESC', 'STYLEDESC', 'MONTH', 'TIERS'
]
# target variable log price
y_feats = [
"LOGPRICE"
]
train_dataloader = lightgbm.Dataset(data=train_dat[x_feats + x_categorical_feats],
label=train_dat[y_feats],
categorical_feature=x_categorical_feats)
valid_dataloader = lightgbm.Dataset(data=valid_dat[x_feats + x_categorical_feats],
label=valid_dat[y_feats],
categorical_feature=x_categorical_feats)
def objective(trial):
# create LGBM dataset
# ref https://www.kaggle.com/c/home-credit-default-risk/discussion/58950
train_dataloader = lightgbm.Dataset(data=train_dat[x_feats + x_categorical_feats],
label=train_dat[y_feats],
categorical_feature=x_categorical_feats)
valid_dataaloader = lightgbm.Dataset(data=valid_dat[x_feats + x_categorical_feats],
label=valid_dat[y_feats],
categorical_feature=x_categorical_feats)
# use the parameter
parameters = {
'objective': trial.suggest_categorical('objective', ['mae', 'rmse', 'huber', 'quantile', 'mape', 'poisson']),
'metric': ['rmse'],
'boosting': 'gbdt',
'lambda_l1': round(trial.suggest_loguniform('lambda_l1', 1e-8, 10.0), 2),
'lambda_l2': round(trial.suggest_loguniform('lambda_l2', 1e-8, 10.0), 2),
'num_leaves': trial.suggest_int('num_leaves', 2, 256),
'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0),
'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0),
'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
'learning_rate': trial.suggest_loguniform('learning_rate', 0.001, 0.5),
'verbose': -1,
'seed': 2021
}
model = lightgbm.train(params=parameters,
train_set=train_dataloader,
valid_sets=valid_dataaloader,
num_boost_round=10000,
early_stopping_rounds=1000,
verbose_eval=False)
y_valid = model.predict(data=valid_dat[x_feats + x_categorical_feats])
# return sqrt(mean_squared_error(valid_data["PRICE"], np.exp(y_valid))) # compare predicted price vs the truth
return sqrt(mean_squared_error(valid_dat["LOGPRICE"], y_valid)) # compare predicted price vs the log truth
if __name__ == "__main__":
sampler = TPESampler(seed=2020)
study = optuna.create_study(direction="minimize",
study_name="fit housing",
load_if_exists=True,
storage="sqlite:///lgbm_model_index_%s.db" % end_date,
pruner=optuna.pruners.MedianPruner(n_startup_trials=20))
study.optimize(objective, n_trials=1000, show_progress_bar=True)
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value)) |
def cut_fruits(fruits):
output = []
for x in fruits:
if x in FRUIT_NAMES:
di = (len(x)+1)//2
output.append(x[:di])
output.append(x[di:])
else:
output.append(x)
return output
'''
Description
You are a Fruit Ninja, your skill is cutting fruit. All the fruit will be
cut in half by your knife. For example:
[ "apple", "pear", "banana" ] -->
["app", "le", "pe", "ar", "ban", "ana"]
As you see, all fruits are cut in half. You should pay attention to "apple":
if you cannot cut a fruit into equal parts, then the first part will have
an extra character.
You should only cut fruit, other things should not be cut, such as "bomb":
[ "apple", "pear", "banana", "bomb"] -->
["app", "le", "pe", "ar", "ban", "ana", "bomb"]
The valid fruit names are preloded for you as:
FRUIT_NAMES
Task
Complete function cut_fruits that accepts argument fruits.
Returns the result in accordance with the rules above.
OK, that's all. I guess this is a 7kyu kata. If you agree, please rank
it as 7kyu and vote very;-) If you think this kata is too easy or too hard,
please shame me by rank it as you want and vote somewhat or none :[
https://www.codewars.com/kata/i-guess-this-is-a-7kyu-kata-number-6-fruit-ninja-i/python
'''
|
# coding: utf-8
import os
import time
import numpy as np
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class DaycareCenter(db.Model):
__tablename__ = 'daycare_center'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Integer, nullable=False)
chief_staff_name = db.Column(db.String, nullable=False)
address = db.Column(db.String, nullable=False)
ph_num1 = db.Column(db.String, nullable=False)
ph_num2 = db.Column(db.String, nullable=False)
ph_num3 = db.Column(db.String, nullable=False)
loc_id = db.Column(db.ForeignKey('location.id'))
loc = db.relationship('Location', primaryjoin='DaycareCenter.loc_id == Location.id', backref='daycare_centers')
class Location(db.Model):
__tablename__ = 'location'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Integer, nullable=False)
class ReportList(db.Model):
__tablename__ = 'report_list'
id = db.Column(db.Integer, primary_key=True)
time = db.Column(db.Integer, nullable=False)
police_name = db.Column(db.String, nullable=False)
status = db.Column(db.String, nullable=False)
loc_id = db.Column(db.ForeignKey('location.id'))
dc_id = db.Column(db.ForeignKey('daycare_center.id'))
vid_id = db.Column(db.ForeignKey('video.id'))
dc = db.relationship('DaycareCenter', primaryjoin='ReportList.dc_id == DaycareCenter.id', backref='report_lists')
loc = db.relationship('Location', primaryjoin='ReportList.loc_id == Location.id', backref='report_lists')
vid = db.relationship('Video', primaryjoin='ReportList.vid_id == Video.id', backref='report_lists')
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String, nullable=False)
pw = db.Column(db.String, nullable=False)
office_num = db.Column(db.Integer, nullable=False)
department = db.Column(db.String, nullable=False)
name = db.Column(db.String, nullable=False)
ph_num1 = db.Column(db.String, nullable=False)
ph_num2 = db.Column(db.String, nullable=False)
ph_num3 = db.Column(db.String, nullable=False)
loc_id = db.Column(db.ForeignKey('location.id'))
loc = db.relationship('Location', primaryjoin='User.loc_id == Location.id', backref='users')
class Video(db.Model):
__tablename__ = 'video'
id = db.Column(db.Integer, primary_key=True)
detection_time = db.Column(db.Integer, nullable=False)
name = db.Column(db.String, nullable=False)
accuracy = db.Column(db.Float, nullable=False)
status = db.Column(db.String, nullable=False)
loc_id = db.Column(db.ForeignKey('location.id'))
dc_id = db.Column(db.ForeignKey('daycare_center.id'))
dc = db.relationship('DaycareCenter', primaryjoin='Video.dc_id == DaycareCenter.id', backref='videos')
loc = db.relationship('Location', primaryjoin='Video.loc_id == Location.id', backref='videos')
def add_video_db(db, video_path, daycare_name, accuracy, status=0):
daycare_center = DaycareCenter.query.filter_by(name=daycare_name).one()
video = Video(
detection_time = time.time() + 9 * 3600,
name = os.path.basename(video_path),
accuracy = round(accuracy,2),
status = status,
loc_id = daycare_center.loc_id,
dc_id = daycare_center.id
)
db.session.add(video)
db.session.commit()
return video
def add_report_db(db, video_info):
police_station = ['답십리지구대', '용신지구대', '청량리파출소', '제기파출소', '전농1파출소', '전농2파출소','장안1파출소', '장안2파출소', '이문지구대', '휘경파출소', '회기파출소']
video = Video.query.filter_by(name=video_info.name).one()
video.status = 1
report_data = ReportList(
time = time.time() + 9 * 3600,
police_name = np.random.choice(police_station, 1)[0],
status = '출동 전',
loc_id = video.loc_id,
dc_id = video.dc_id,
vid_id = video.id
)
db.session.add(report_data)
db.session.commit()
return report_data
|
import logging
import pymc3 as pm
import theano.tensor as tt
from theano.compile.ops import as_op
import numpy as np
from scipy import stats
logger = logging.getLogger('root')
# TODO has to be tested with sample data, to make sure that it works properly.
def add_inv_logit_normal_model(hierarchical_model):
raise NotImplementedError("work in progress . . . ")
with pm.Model() as hierarchical_model.pymc_model:
mu = pm.Normal('mu', mu=0, sd=2)
theta = pm.invlogit("p", mu)
observations = []
def addObservations():
with hierarchical_model.pymc_model:
for i in range(hierarchical_model.n_groups):
observations.append(pm.Bernoulli(f'y_{i}', theta[i], observed=y[i]))
hierarchical_model.add_observations_function = addObservations
|
from django.shortcuts import render,redirect
from django.template.response import TemplateResponse
from shinelaundry.models import slider,image,faq,services_info,index_image,personal_info,whychooseus,socialmediaaccount
from django.core.mail import send_mail
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.html import strip_tags
# obj = slider.objects.all()
# context = {
# 'obj':obj
# }
# Create your views here.
def index(request):
if request.method == "POST":
to = request.POST.get('toemail')
# content = request.POST.get('content')
html_content = render_to_string("email_template.html")
text_content = strip_tags(html_content)
email = EmailMultiAlternatives(
#subject
"testing",
#content
text_content,
#from email
settings.EMAIL_HOST_USER,
#rec list
[to]
)
email.attach_alternative(html_content,"text/html")
email.send()
obj1 = services_info.objects.all()
obj = slider.objects.all()
obj3 = index_image.objects.all()
obj4 = personal_info.objects.all()
obj5 = whychooseus.objects.all()
obj6 = socialmediaaccount.objects.all()
context = {
'obj':obj,
'obj1':obj1,
'obj3':obj3,
'obj4':obj4,
'obj5':obj5,
'obj6':obj6
}
return TemplateResponse (request, 'index.html', context)
def about(request):
obj4 = personal_info.objects.all()
obj6 = socialmediaaccount.objects.all()
context = {
'obj4':obj4,
'obj6':obj6
}
return TemplateResponse (request, 'about.html',context)
def contact(request):
obj4 = personal_info.objects.all()
obj6 = socialmediaaccount.objects.all()
context = {
'obj4':obj4,
'obj6':obj6
}
return TemplateResponse (request, 'contact.html', context)
def vision(request):
obj4 = personal_info.objects.all()
obj6 = socialmediaaccount.objects.all()
context = {
'obj4':obj4,
'obj6':obj6
}
return TemplateResponse (request, 'vision.html', context)
def services(request):
obj1 = services_info.objects.all()
obj4 = personal_info.objects.all()
obj6 = socialmediaaccount.objects.all()
context1 = {
'obj1':obj1,
'obj4':obj4,
'obj6':obj6
}
return TemplateResponse (request, 'services.html',context1)
def portfolio(request):
obj2 = image.objects.all()
obj4 = personal_info.objects.all()
obj6 = socialmediaaccount.objects.all()
context = {
'obj2':obj2,
'obj4':obj4,
'obj6':obj6
}
return TemplateResponse (request, 'portfolio.html', context)
def faq1(request):
obj = faq.objects.all()
obj4 = personal_info.objects.all()
obj6 = socialmediaaccount.objects.all()
context = {
'obj':obj,
'obj4':obj4,
'obj6':obj6
}
return TemplateResponse (request, 'faq.html', context)
# def sendanemail(request):
# if request.method == "POST":
# to = request.POST.get('toemail')
# # content = request.POST.get('content')
# html_content = render_to_string("email_template.html")
# text_content = strip_tags(html_content)
# email = EmailMultiAlternatives(
# #subject
# "testing",
# #content
# text_content,
# #from email
# settings.EMAIL_HOST_USER,
# #rec list
# [to]
# )
# email.attach_alternative(html_content,"text/html")
# email.send()
# return render(
# request,
# 'index.html',
# {
# 'title':'send an email'
# }
# )
|
from arago.actors.routers.on_demand.on_demand_router import OnDemandRouter
|
#!/usr/bin/python
import os
import sys
import pickle
import signal
import urllib2
from datetime import datetime
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
cache_dir='/var/lib/bdii/gip/cache/gip/top-urls.conf/'
cache_file = "/var/cache/fcr/exclude.ldif"
sam_url = 'http://grid-monitoring.cern.ch/myegi/sam-pi/status_of_service_in_profile?vo_name=cms&profile_name=CMS_BLACKLIST&output=xml'
def handler(signum, frame):
if ( signum ==14 ):
raise IOError, "Get Black List Timed Out!"
def get_critical():
try:
response = urllib2.urlopen(sam_url).read()
except Exception, e:
sys.stderr.write(str(e))
sys.exit(1)
critical = []
root = ElementTree.XML(response)
for profile in root.findall("Profile"):
for service in profile.getchildren():
hostname = service.attrib['hostname']
flavour = service.attrib['flavour']
status = service.attrib['status']
if status == 'CRITICAL':
critical.append(hostname)
return critical
def get_dns():
command = "cat %s/*.ldif" %(cache_dir)
output = os.popen(command).read()
output = output.replace('\n ', '')
output = output.replace('\n\n', '\n')
dns = {}
for line in output.split('\n'):
attribute = line.split(':')[0]
if ( attribute.lower() == 'dn' ):
dn = line
if ( attribute.lower() == 'glueceaccesscontrolbaserule' ):
type = line.split(':')[1].strip().lower()
if ( type == 'vo' or type == 'voms' ):
value = line.split(':')[2].lower().strip()
if ( value == 'cms' or value[:5] == '/cms/' ):
if not dn in dns:
dns[dn] = []
dns[dn].append(line)
# Check for delayed delete entries
command = "ldapsearch -LLL -x -h localhost -p 2170 -b mds-vo-name=local,o=grid objectClass=GlueCEAccessControlBase 2>/dev/null"
pipe = os.popen(command)
output=pipe.read()
pipe.close()
output = output.replace('\n ', '')
output = output.replace('\n\n', '\n')
dns_db = {}
for line in output.split('\n'):
attribute = line.split(':')[0]
if ( attribute.lower() == 'dn' ):
dn = line
if ( attribute.lower() == 'glueceaccesscontrolbaserule' ):
type = line.split(':')[1].strip().lower()
if ( type == 'vo' or type == 'voms' ):
value = line.split(':')[2].lower().strip()
if ( value == 'cms' or value[:5] == '/cms/' ):
if not dn in dns_db:
dns_db[dn] = []
dns_db[dn].append(line)
for dn in dns_db:
if not dn in dns:
dns[dn]=dns_db[dn]
return dns
def get_hostname(dn):
hostname = None
nodes = dn[3:].strip().split(',')
for node in nodes:
if node.split('=')[0].lower() == 'glueceuniqueid':
hostname = node.split('=')[1]
hostname = hostname.split(':')[0]
return hostname
def get_sitename(dn):
sitename = None
nodes = dn.split(',')
for node in nodes:
if node.split('=')[0].lower() == 'mds-vo-name':
if not node.split('=')[1].lower() == 'local':
sitename = node.split('=')[1]
return sitename
def get_header():
header = '''############################################################################
#
# FCR exclude LDIF file
# Created at %s
#
############################################################################
''' %(datetime.ctime(datetime.now()),)
return header
def get_section():
section = '''############################################################################
# VO cms
############################################################################
'''
return section
if __name__ == "__main__":
# Create Timeout Alarm
signal.signal(signal.SIGALRM, handler)
signal.alarm(10)
critical_services = get_critical()
signal.alarm(0) # Disable the alarm
dns = get_dns()
sites = {}
for dn in dns:
hostname = get_hostname(dn)
if hostname in critical_services:
sitename = get_sitename(dn)
if not sitename in sites:
sites[sitename] = {}
if not hostname in sites[sitename]:
sites[sitename][hostname] = []
sites[sitename][hostname].append(dn)
input_fh=open(cache_file, 'w')
input_fh.write(get_header())
input_fh.write(get_section())
for site in sites:
for hostname in sites[site]:
input_fh.write('\n# site:%s, node: %s\n' %(site,hostname,))
for dn in sites[site][hostname]:
input_fh.write('\n%s\n' %(dn) )
input_fh.write('changetype:modify\n')
for value in dns[dn]:
input_fh.write('delete: GlueCEAccessControlBaseRule\n')
input_fh.write('%s\n' %(value) )
input_fh.write('-\n')
input_fh.close()
|
import os
import glob
import shutil
current_directory = os.path.dirname(os.path.abspath(__file__))
# deletar arquivos
# os.chdir('fakefolder')
# # print(os.getcwd())
# files = glob.glob('*.txt')
# for file in files:
# print(file)
# os.unlink(file)
# deletar pastas
shutil.rmtree(current_directory + '/fakefolder/') |
import dash_bootstrap_components as dbc
from dash import html
items = [
dbc.DropdownMenuItem("Item 1"),
dbc.DropdownMenuItem("Item 2"),
dbc.DropdownMenuItem("Item 3"),
]
dropdowns = html.Div(
[
dbc.DropdownMenu(
items, label="Primary", color="primary", className="m-1"
),
dbc.DropdownMenu(
items, label="Secondary", color="secondary", className="m-1"
),
dbc.DropdownMenu(
items, label="Success", color="success", className="m-1"
),
dbc.DropdownMenu(
items, label="Warning", color="warning", className="m-1"
),
dbc.DropdownMenu(
items, label="Danger", color="danger", className="m-1"
),
dbc.DropdownMenu(items, label="Info", color="info", className="m-1"),
],
style={"display": "flex", "flexWrap": "wrap"},
)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///admin.sqlite3'
app.config['SECRET_KEY'] = 'myflaskkey'
app.config['FLASK_ADMIN_SWATCH'] = 'flatly'
db = SQLAlchemy(app)
admin = Admin(app, name='Dashboard', template_mode='bootstrap3')
class Murid(db.Model):
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.Unicode(64))
last_name = db.Column(db.Unicode(64))
birthday = db.Column(db.DateTime)
email = db.Column(db.Unicode(128))
phone = db.Column(db.Unicode(32))
city = db.Column(db.Unicode(128))
country = db.Column(db.Unicode(128))
notes = db.Column(db.UnicodeText)
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
# admin.add_view(ModelView(Person, db.session))
admin.add_view(ModelView(Murid, db.session))
if __name__ == '__main__':
app.run(debug=True)
|
from .GistPagination import *
|
import pandas as pd
"""
Copying and pasting dataframes from stackoverflow is something
that I do a lot, so I made something that makes it easy to convert
into a pandas dataframe.
"""
def from_stack(string):
"""Returns a pd.DataFrame object from string.
string (string): String version of dataframe.
"""
import re
from io import StringIO
pat = r'(?<!\\n)[^\S\n]+'
dat = re.sub(pat, ',', string)
df = pd.read_csv(StringIO(dat))
return df
|
"""
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Django nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import datetime
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.http import base36_to_int, int_to_base36, urlsafe_base64_decode, urlsafe_base64_encode
class EmailVerificationTokenGenerator:
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
try:
key_salt = settings.CUSTOM_SALT
except AttributeError:
key_salt = "django-email-verification.token"
algorithm = None
secret = settings.SECRET_KEY
def make_token(self, user, expiry=None):
"""
Return a token that can be used once to do a password reset
for the given user.
Args:
user (Model): the user
expiry (datetime): optional forced expiry date
Returns:
(tuple): tuple containing:
token (str): the token
expiry (datetime): the expiry datetime
"""
if expiry is None:
return self._make_token_with_timestamp(user, self._num_seconds(self._now()))
return self._make_token_with_timestamp(user, self._num_seconds(expiry) - settings.EMAIL_TOKEN_LIFE)
def check_token(self, token):
"""
Check that a password reset token is correct.
Args:
token (str): the token from the url
Returns:
(tuple): tuple containing:
valid (bool): True if the token is valid
user (Model): the user model if the token is valid
"""
try:
email_b64, ts_b36, _ = token.split("-")
email = urlsafe_base64_decode(email_b64).decode()
if hasattr(settings, 'EMAIL_MULTI_USER') and settings.EMAIL_MULTI_USER:
users = get_user_model().objects.filter(email=email)
else:
users = [get_user_model().objects.get(email=email)]
ts = base36_to_int(ts_b36)
except (ValueError, get_user_model().DoesNotExist):
return False, None
user = next(filter(lambda u: constant_time_compare(self._make_token_with_timestamp(u, ts)[0], token), users),
None)
if not user:
return False, None
now = self._now()
if (self._num_seconds(now) - ts) > settings.EMAIL_TOKEN_LIFE:
return False, None
return True, user
def _make_token_with_timestamp(self, user, timestamp):
email_b64 = urlsafe_base64_encode(user.email.encode())
ts_b36 = int_to_base36(timestamp)
hash_string = salted_hmac(
self.key_salt,
self._make_hash_value(user, timestamp),
secret=self.secret,
).hexdigest()
return f'{email_b64}-{ts_b36}-{hash_string}', \
datetime.fromtimestamp(timestamp + settings.EMAIL_TOKEN_LIFE)
@staticmethod
def _make_hash_value(user, timestamp):
login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)
return str(user.pk) + user.password + str(login_timestamp) + str(timestamp)
@staticmethod
def _num_seconds(dt):
return int((dt - datetime(2001, 1, 1)).total_seconds())
@staticmethod
def _now():
return datetime.now()
default_token_generator = EmailVerificationTokenGenerator()
|
#!/usr/bin/python3
def print_matrix_integer(matrix=[[]]):
for i in range(0, len(matrix)):
for j in range(0, len(matrix[i])):
print("{:d}".format(matrix[i][j]), end="")
if ((j + 1) != len(matrix[i])):
print("{:s}".format(" "), end="")
print("{:s}".format("\n"), end="")
|
import csv
import heapq
import os
import random
import itertools
from collections import deque
from igraph import *
from utils.graph_jaccard_similarity import graph_jaccard_similarity, bitmask_file_parser, \
jaccard_all_pairs_similarity_file_parser, jaccard_top_k_similar_hashtags, medrank_algorithm
from utils.vector_utils import difference
# File Names
GRAPH_FILE = "final_graph.tsv"
TRANSLATION_FILE = "ID_translation.tsv"
DATA = "data1"
GRAPH_DIR = "graph"
# Edges attributes
WEIGHT = "weight"
INC = "increase"
MAIN = "main"
# Hashtags variables
hashtags_bitmask = None
hashtags = None
hashtags_all_pairs_similarity = None
# Translations map
translations_map = {}
# Vertices attributes
ACTIVE = "active"
EXPECTED_VALUE = "e_v"
COST = "cost"
# Others
# Factor used to weight the jaccard similarity of hashtags
BALANCE_FACTOR = 0.1
# Max possible weight
MAX_WEIGHT = 100000
# Penalization
PENALIZATION = MAX_WEIGHT/4
# Setup method to prepare the data, there is no need to call the methods individually
def setup():
global hashtags_bitmask, hashtags, hashtags_all_pairs_similarity
hashtags_bitmask = bitmask_file_parser()
hashtags = hashtags_bitmask.keys()
hashtags_all_pairs_similarity = jaccard_all_pairs_similarity_file_parser()
path = os.path.join(os.pardir, DATA, GRAPH_DIR)
g = load_graph(os.path.join(path, GRAPH_FILE), os.path.join(path, TRANSLATION_FILE))
set_up_edges_empty_attributes(g)
return g
# Set to 0.0 attributes with None values
def set_up_edges_empty_attributes(graph):
for e in graph.es:
# print e
for h in hashtags:
# print h
if e[h] is None:
e[h] = 0.0
# Load the graph in main memory
def load_graph(graph_in, translation_out=TRANSLATION_FILE, verbose=False):
global translations_map
if verbose:
print "[load_graph] Loading graph.."
g = Graph(directed=True)
# Open the graph file, and a new file:
# the new file will contain a mapping between the tweeter accounts and the nodes' IDs in the graph.
with open(graph_in, 'rb') as tsvin, open(translation_out, 'wb') as tsvout:
tsvin = csv.reader(tsvin, delimiter='\t')
tsvout = csv.writer(tsvout, delimiter='\t')
current_id = 0
current_edge = 0
for row in tsvin:
# Check if each of the twitter accounts of the current edge
# have already a corresponding vertex in the graph.
for i in range(0, 2):
twitter_id = row[i]
# If not create a new vertex and update the mapping
if translations_map.has_key(twitter_id) is False:
translations_map[twitter_id] = current_id
tsvout.writerow([twitter_id, translations_map[twitter_id]])
g.add_vertex()
g.vs[current_id][ACTIVE] = False
g.vs[current_id][EXPECTED_VALUE] = 0.0
g.vs[current_id][COST] = 0.0
current_id += 1
# Add the edge to the graph
g.add_edge(translations_map.get(row[0]), translations_map.get(row[1]))
# Add the edge attributes
lenght = len(row)
for i in range(2, lenght, 2):
g.es[current_edge][row[i]] = float(row[i + 1])
g.es[current_edge][WEIGHT] = 0.0
g.es[current_edge][INC] = 0.0
g.es[current_edge][MAIN] = False
current_edge += 1
if verbose:
print "[load_graph] Graph loaded."
return g
# Get node id from screen_name
def translate(screen_name):
screen_name = screen_name.strip("\n")
try:
return translations_map[screen_name]
except:
print "name not found"
# Computes the homogeneity of the group of hashtags
def homogeneity(hashtags_list, verbose=False):
if verbose:
print "[homogeneity] Checking homogeneity."
if len(hashtags) <= 1:
return 1
# return 1
ht_dict = {}
for h in hashtags_list:
ht_dict[h] = hashtags_bitmask[h]
return 1 - (1 - graph_jaccard_similarity(ht_dict)) * BALANCE_FACTOR
# Simulate the independent cascade process
def independent_cascade_process(g, source, tweet_hashtags, verbose=False):
result = []
balance_coeff = homogeneity(tweet_hashtags)
# Stack containing the nodes activated in the previous iteration
# First iteration has only the source node
it_cur = []
g.vs[source][ACTIVE] = True
it_cur.append(source)
result.append(source)
# Stack containing the nodes activated in the current iteration
it_cur_n = []
iteration = 1
# Each new iteration of this cycle is a new iteration of the process
while len(it_cur) > 0:
if verbose:
print "[independent_cascade_process] ------------------------------------------------------"
print "[independent_cascade_process] Iteration: ", iteration
# Simulate the process for each newly active node
while len(it_cur) > 0:
v = it_cur.pop()
if verbose:
print "[independent_cascade_process] Considering current active node: ", v
edges = g.incident(v)
for e in edges:
# We consider only the hastag parameter that maximizes the probability,
# this probability will then adjusted according to the jaccard similarity of the hashtags.
# The Jaccard similarity is an indicator of how close the hashtags are.
max = tweet_hashtags[0]
for h in tweet_hashtags[1:]:
if g.es[e][h] > g.es[e][max]:
max = h
pr = g.es[e][max] * balance_coeff
if verbose:
print "[independent_cascade_process] Probability of activation on the considered edge: ", pr
# Random number to simulate the biased coin flip
r = random.random()
if verbose:
print "[independent_cascade_process] Value obtained: ", r
# If the result is head, activated the node
if r <= pr:
u = g.es[e].target
# If the node was already active, do nothing
if g.vs[u][ACTIVE] is False:
if verbose:
print "[independent_cascade_process] Node ", u, " activated."
g.vs[u][ACTIVE] = True
it_cur_n.append(u)
result.append(u)
# Move to the next iteration
it_cur = it_cur_n
it_cur_n = []
iteration += 1
if verbose:
print "[independent_cascade_process] ------------------------------------------------------"
print "[independent_cascade_process] Done."
return result
# Estimate the expected outcome of an independent cascade run
def estimate_expected_outcome(g, source, hashtags, runs, expected_outcome, verbose=False):
# This is the increment each node can get from a single run,
# At the end, the number of increments received will be an estimation of the
# probability of the node being activated.
# Hence we will have an estimation of the expected outcome of the process.
# This value can be used also to average the number of nodes activated in the different runs of the process
inc = 1.0 / runs
avg_number_activated = 0
if verbose:
print "[estimate_expected_outcome] Computing Independent Cascade Process expected outcome.."
# Repeate the process "runs" times
for i in range(runs):
if verbose:
print "[estimate_expected_outcome] Starting run ", i
activations = 0
result = independent_cascade_process(g, source, hashtags)
for i in result:
activations += 1
g.vs[i][EXPECTED_VALUE] += inc
avg_number_activated += activations * inc
deactivate(g)
if verbose:
print "[estimate_expected_outcome] Run ", i, " completed."
# Collect results
for v in g.vs:
expected_outcome.append(v[EXPECTED_VALUE])
# Clean up
reset_graph(g)
if verbose:
print "[estimate_expected_outcome] Done."
return avg_number_activated
# Retrieve the most suitable hashtags, given as input the set of hashtags currently adopted
def get_close_hahstags(hashtags_in, k=5):
# print "hashtags_in", hashtags_in
ranking = medrank_algorithm(hashtags_all_pairs_similarity, hashtags_in, k)
#result = [tup[1] for tup in ranking]
#return result
return ranking
# Maximize the expected outcome of an independent cascade run
def maximize_expected_outcome(g, source, current_hashtags, runs, current_outcome, verbose=False):
outcomes = {}
if verbose:
print "[maximize_expected_outcome] Maximizing expected outcome.."
if verbose:
print "[maximize_expected_outcome] Retrieving most suitable hashtags.."
suggested_hashtags = get_close_hahstags(current_hashtags)
if verbose:
print "[maximize_expected_outcome] Hashtags retrieved."
if verbose:
print "[maximize_expected_outcome] Starting simulations.."
print "suggested_hashtags: ",suggested_hashtags
for h in suggested_hashtags:
current_hashtags = []
current_hashtags.extend(current_hashtags)
current_hashtags.append(h)
outcome = []
n = estimate_expected_outcome(g, source, current_hashtags, runs, outcome)
positivity = difference(outcome, current_outcome)
if verbose:
print "[maximize_expected_outcome] Hashtag: ", h, " positivity: ", positivity
if positivity >= 0:
tup = (n, positivity)
outcomes[h] = tup
if verbose:
print "[maximize_expected_outcome] Done."
return outcomes
# Retrieve the top-k hashtags according to the the vertex interests
def most_interested_in_hashtags(g, id, k=3, tweet_hashtags=None, verbose=False):
result = []
weighted_heap = []
hashtags_weight = {}
edges = g.incident(id, mode=IN)
if verbose:
print "[most_interested_in_hashtags] Retrieve top k hashtags according to vertex interests.."
for h in hashtags:
if verbose:
print "[most_interested_in_hashtags] Trying hashtag: ", h
cur_hashtags = []
if tweet_hashtags is not None:
cur_hashtags.extend(tweet_hashtags)
cur_hashtags.append(h)
hmg = homogeneity(cur_hashtags)
if verbose:
print "[most_interested_in_hashtags] Homogeneity of current hashtag set: ", hmg
for e in edges:
edge = g.es[e]
if edge[h] > 0:
weight = 1 / (edge[h] * hmg)
else:
weight = MAX_WEIGHT
if verbose:
print "[most_interested_in_hashtags] Edge probability on hashtag", h, ": ", edge[h]
print "[most_interested_in_hashtags] Weight: ", weight
if h in hashtags_weight:
hashtags_weight[h] += weight
else:
hashtags_weight[h] = weight
if verbose:
print "[most_interested_in_hashtags] Updated hashtag weight: ", hashtags_weight[h]
for e in hashtags_weight.keys():
tup = (hashtags_weight[e], e)
weighted_heap.append(tup)
heapq.heapify(weighted_heap)
l = min(len(weighted_heap), k)
for i in range(0, l):
tup = heapq.heappop(weighted_heap)
result.append(tup[1])
if verbose:
print "[most_interested_in_hashtags] Done."
return result
# Weight edges according to the input hashtag
def weight_edges(g, field, homogeneity=1, verbose=False):
if verbose:
print "[weight_edges] Weighting edges.."
for edge in g.es:
weight = MAX_WEIGHT
if field in edge.attributes():
den = (edge[field] * homogeneity)
if den > 0:
weight = 1 / den
edge[WEIGHT] = weight
if verbose:
print "[weight_edges] Edge:", edge.source, " ", edge.target, " weights ", edge[WEIGHT]
if verbose:
print "[weight_edges] Done."
# Check whether the suggested side track edges can be inserted into a shortest path with no loops
def is_straight_path(g, source, target, path, verbose=True):
deactivate(g, verbose=verbose)
d_path = {}
count = 0
if verbose:
print "[is_path] Checking if path: ", path, " is indeed a path.."
for t in path:
key = int(t[1])
d_path[key] = int(t[2])
if len(path) == 0:
if verbose:
print "[is_path] Done."
return False
'''if len(path) == 1:
if verbose:
print "[is_path] Done."
if path[0][1] == target:
if verbose:
print "[is_path] Result:", False
return False
n = path[0][1]
edges = g.incident(n)
for e in edges:
if g.es[e][MAIN] == True:
if verbose:
print "[is_path] Result:", True
return True
if verbose:
print "[is_path] Result:", False
return False'''
if verbose:
print "[is_path] Edges: ", d_path
cur = source
while cur != target:
v = g.vs[cur]
if v[ACTIVE] is True:
return False
v[ACTIVE] = True
if verbose:
print "[is_path] Current node:", cur
if d_path.has_key(cur):
if verbose:
print "[is_path] Taking side edge from: ", cur
old = cur
cur = d_path[cur]
del d_path[old]
count += 1
continue
edges = g.incident(cur)
for e in edges:
edge = g.es[e]
if edge[MAIN] is True:
cur = edge.target
if verbose:
print "[is_path] Done."
print "[is_path] Result: ", count == len(path)
return count == len(path)
# Check whether the current node belongs to the inverse Shortest Paths Tree
def is_valid_edge(g, node, verbose=False):
if verbose:
print "[is_valid_edge] Checking if node:", node, " belongs to the Shortest Paths Tree.."
edges = g.incident(g.vs[node])
for e in edges:
if g.es[e][MAIN] is True:
if verbose:
print "[is_valid_edge] Done."
print "[is_valid_edge] Result:",True
return True
if verbose:
print "[is_valid_edge] Done."
print "[is_valid_edge] Result:", False
return False
# Remove the edges incident to the target node and return them in a list
def remove_incidents(g, target, verbose=True):
if verbose:
print "[remove_incidents] Temporary removing useless edges.."
removed = {}
out_edges = g.incident(target)
if verbose:
print "[remove_incidents] Removing edges: "
for id in out_edges:
edge = g.es[id]
key = edge.target
# Dictionary to keep the attributes
dict = {}
dict[MAIN] = edge[MAIN]
dict[WEIGHT] = edge[WEIGHT]
dict[INC] = edge[INC]
for h in hashtags:
dict[h] = edge[h]
# Save the dictionary
removed[key] = dict
if verbose:
print "[remove_incidents] Edge: ", edge.source, " ", edge.target
g.delete_edges(out_edges)
if verbose:
print "[remove_incidents] Done."
return removed
# Add edges in the list to the target node
def add_edges(g, target, edges_list, verbose=True):
if verbose:
print "[add_edges] Inserting edges:"
for k in edges_list.keys():
g.add_edge(target, k)
if verbose:
print "[add_edges] Inserted edge:", target, " ", k
if verbose:
print "[add_edges] Done. Loading now attibutes.."
edges = g.incident(target)
for e in edges:
edge = g.es[e]
if verbose:
print "[add_edges] Loading attributes on edge: ", edge.source, " ", edge.target
key = edge.target
dict = edges_list[key]
for k in dict.keys():
edge[k] = dict[k]
if verbose:
print "[add_edges] Done."
# Compute the shortest paths from every node to target
def compute_shortest_paths(g, target, weights=WEIGHT, mode=IN, verbose=True):
if verbose:
print "[compute_shortest_paths] Computing inverse shortest paths.. "
shortest_paths = g.get_shortest_paths(target, weights=weights, mode=mode, output="epath")
for p in shortest_paths:
for e in p:
g.es[e][MAIN] = True
if verbose:
print "[compute_shortest_paths] Done."
# Reconstruct cost of shortest paths to target
def reconstruct_paths_cost(g, target, verbose=True):
deactivate(g, verbose=verbose)
if verbose:
print "[reconstruct_paths_cost] Reconstructing paths costs.."
v_queue = deque()
v_queue.append(target)
g.vs[target][ACTIVE] = True
while len(v_queue) > 0:
v = v_queue.popleft()
# Reuse ACTIVE field as VISITED flag
edges = g.incident(v)
for e in edges:
if g.es[e][MAIN] is True:
u = g.es[e].target
if verbose:
print "[reconstruct_paths_cost] Edge: ", g.es[e].source, " ", g.es[e].target, \
" is in some shortest path with cost", g.es[e][WEIGHT]
g.vs[v][COST] = g.es[e][WEIGHT] + g.vs[u][COST]
if verbose:
print "[reconstruct_paths_cost] Node:", v, " has cost: ", g.vs[v][COST]
break
edges = g.incident(v, mode=IN)
if verbose:
print "[reconstruct_paths_cost] Considering outer neighbors of ", v
for e in edges:
if g.es[e][MAIN] is True:
u = g.es[e].source
if verbose:
print "[reconstruct_paths_cost] Considering node ", u
if g.vs[u][ACTIVE] is False:
if verbose:
print "[reconstruct_paths_cost] Node ", u, " is now active."
g.vs[u][ACTIVE] = True
v_queue.append(u)
for v in g.vs:
if v[ACTIVE] is False:
v[ACTIVE] = True
v[COST] = MAX_WEIGHT
if verbose:
print "[reconstruct_paths_cost] Done."
# Compute the increment in cost of the possible paths when taking sidetrack edges
def compute_sidetrack_edges_increment(g, verbose=True):
sidetrack_edges = []
# count = 0
if verbose:
print "[compute_sidetrack_edges_increment] Computing increment in cost of sidetrack edges.."
# costs = []
# heapq.heapify(costs)
# Can be improved
for edge in g.es:
if edge[MAIN] is False:
# count += 1
target = g.vs[edge.target]
if target[COST] >= MAX_WEIGHT:
continue
else:
edge[INC] = edge[WEIGHT] + g.vs[edge.target][COST] - g.vs[edge.source][COST]
tup = (edge[INC], edge.source, edge.target)
sidetrack_edges.append(tup)
if verbose:
print "[compute_sidetrack_edges_increment] Edge: ", edge.source, " " \
, edge.target, \
" Side_track_cost= edge[WEIGHT] + g.vs[edge.target][COST] - g.vs[edge.source][COST] =", \
edge[WEIGHT], "+", g.vs[edge.target][COST], "-", g.vs[edge.source][COST], "=", \
edge[INC]
if verbose:
print "[compute_sidetrack_edges_increment] Ordering sidetrack edges.."
heapq.heapify(sidetrack_edges)
if verbose:
print "[compute_sidetrack_edges_increment] Done."
# return ordered_s_e
return sidetrack_edges
# Compute k least cost simple paths from source to target
def get_k_shortest_paths(g, source, target, result, k=5, verbose=True):
# Need first to remove outgoing edges from target, edges will be restored at the end of the computation
removed = remove_incidents(g, target, verbose=verbose)
if verbose:
print "[get_k_shortest_paths] Computing ", k, "-least cost paths.."
print "[get_k_shortest_paths] Source: ", source
print "[get_k_shortest_paths] Target: ", target
compute_shortest_paths(g, target, verbose=verbose)
reconstruct_paths_cost(g, target, verbose=verbose)
# Cost of the optimal path
OPT = g.vs[source][COST]
if verbose:
print "[get_k_shortest_paths] Computing side edges increment in cost.."
sidetrack_edges = compute_sidetrack_edges_increment(g, verbose=verbose)
if verbose:
print "[get_k_shortest_paths] Done."
print "[get_k_shortest_paths] Computing candidate shortest paths side edges.. "
ordered_s_e = []
# The least cost path is already known
candidates_found = 1
while len(sidetrack_edges) > 0 & candidates_found < k:
p = []
tup = heapq.heappop(sidetrack_edges)
p.append(tup)
if is_valid_edge(g, tup[1], verbose=verbose):
ordered_s_e.append(tup)
candidates_found += 1
# Update the number of paths
k = min(k,candidates_found)
if verbose:
print "[get_k_shortest_paths] Candidates found: ", candidates_found
print "[get_k_shortest_paths] Done."
print "[get_k_shortest_paths] Computing paths.."
candidate_paths = []
paths = []
s = ()
tup = (OPT, s)
heapq.heapify(paths)
heapq.heappush(paths, tup)
if k == 1:
result.extend(paths)
return OPT
for l in range(1, k):
for tup in itertools.combinations(ordered_s_e, l):
candidate_cost = 0
for edge in tup:
candidate_cost += edge[0]
# Check if path might be in the k-least cost paths
candidate_paths.append(tup)
if verbose:
print "[get_k_shortest_paths] Tuple: ", tup
print "[get_k_shortest_paths] Cost: ", candidate_cost
if verbose:
print "[get_k_shortest_paths] Candidates: ", candidate_paths
# Can be improved
for p in candidate_paths:
cost = 0
if verbose:
print "[get_k_shortest_paths] Examinating path: ", p
for t in p:
cost += t[0]
if is_straight_path(g, source, target, p, verbose=verbose):
if verbose:
print "[get_k_shortest_paths] Path is valid: ", p
tup = (cost + OPT, p)
heapq.heappush(paths, tup)
else:
if verbose:
print "[get_k_shortest_paths] Path is NOT valid: ", p
if verbose:
print "[get_k_shortest_paths] Done."
print "[get_k_shortest_paths] Result:"
for p in paths:
print "[get_k_shortest_paths] Path: ", p
k = min(k, len(paths))
total = 0
for i in range(0, k):
tup = heapq.heappop(paths)
total += tup[0]
result.append(tup)
heapq.heapify(result)
add_edges(g, target, removed, verbose=verbose)
return total
# Maximize the probability of reaching target node
def maximize_target_outcome(g, source, target, tweet_hashtags=[], k=5, verbose=False):
outcomes = []
if verbose:
print "[maximize_target_outcome] Maximizing outcome on node: ", target
pref_hashtags = most_interested_in_hashtags(g, target, k, tweet_hashtags, verbose=verbose)
count = 0
if verbose:
print "[maximize_target_outcome] Computing score for the k most favourite hashtags"
for h in pref_hashtags:
if count == k:
break
count += 1
if verbose:
print "[maximize_target_outcome] Hashtag: ", h
cur_hashtags = []
cur_hashtags.extend(tweet_hashtags)
cur_hashtags.append(h)
hmg = homogeneity(cur_hashtags, verbose=verbose)
weight_edges(g, h, hmg, verbose=verbose)
outcome = []
tot = get_k_shortest_paths(g, source, target, outcome, k, verbose=verbose)
missing_paths = k - len(outcome)
# If the number of path retrieved is less than k, the hashtag gets a penalization for each missing path
for i in range(0,missing_paths):
tot+=PENALIZATION
tup = (tot,h, outcome)
outcomes.append(tup)
if verbose:
print "[maximize_target_outcome] Outcome: ", (tot,outcome)
outcomes.sort()
if verbose:
print "[maximize_target_outcome] Done."
return outcomes
# UTILS
# Deactivate nodes
def deactivate(g, verbose=False):
if verbose:
print "[deactivate] Deactivating nodes.."
# Deactivate all nodes
for v in g.vs:
v[ACTIVE] = False
if verbose:
print "[deactivate] Done."
# Reset edge weights
def reset_weights(g, verbose=False):
if verbose:
print "[reset_weights] Resetting weights.."
for e in g.es:
e[WEIGHT] = 0.0
if verbose:
print "[reset_weights] Done."
# Reset the graph to default settings
def reset_graph(g, verbose=False):
if verbose:
print "[reset_graph] Resetting graph.."
# Reset nodes to their default settings
for v in g.vs:
v[ACTIVE] = False
v[EXPECTED_VALUE] = 0.0
if verbose:
print "[reset_graph] v"
# Reset edges to their default settings
for e in g.es:
e[WEIGHT] = 0.0
e[MAIN] = False
e[INC] = 0.0
if verbose:
print "[reset_graph] e"
if verbose:
print "[reset_graph] Done."
def is_hashtag(h_list):
# print "h_list:",h_list
for h in h_list:
# print h
# print hashtags
if h not in hashtags:
return False
return True
if __name__ == "__main__":
#
# g = setup()
# print hashtags[2], hashtags[1]
# print most_interested_in_hashtags(g,7)
'''cur_outcome = []
n = estimate_expected_outcome(g, 0, [hashtags[2], hashtags[1]], 5, cur_outcome)
print n
print cur_outcome
result = maximize_expected_outcome(g, 0, [hashtags[2], hashtags[1]], 5, cur_outcome)
print result
print translate("matteosalvinimi")
# r = g.get_shortest_paths(0,4)
# print r'''
g = load_graph("k_paths_test_graph.tsv")
print g
# print get_close_hahstags([hashtags[2], hashtags[1]])
'''print [hashtags[0],hashtags[1]]
result = []
print "Average number of nodes activated: ", \
print result
print "Nodes:"
for n in g.vs:
print n
print "Edges:"
for e in g.es:
print e.source, e.target, e'''
# weight_edges(g, "sale")
# get_k_shortest_paths(g, 0, 4, [],1)
# result = []
# estimate_expected_outcome(g,0,["acqua"],50,result)
# print result
# most_interested_hashtags(g,0,0)
|
# class Solution:
# def isValid(self, s: str) -> bool:
# known = {")":"(", "}":"{", "]":"["}
# stack = []
# for i in s:
# if i in known.values():
# stack.append(i)
# else:
# if stack:
# t = stack.pop()
# if t != known[i]:
# return False
# else:
# return False
# return False if stack else True
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
d = {"(":")", "[":"]", "{": "}"}
stack = []
for item in s:
if item in d:
key = item
stack.append(key)
else:
value = item
key = stack.pop() if stack else None
if not key:
return False
if value != d[key]:
return False
return True if not stack else False
if __name__ == '__main__':
s = Solution()
t = s.isValid("()[]{}")
print(t)
|
"""
Дано семизначное число. Найдите сумму квадратов четных и нечетных цифр.
"""
number = int(input("Введите семизначное число \n"))
num1 = number % 10
num2 = number % 100//10
num3 = number % 1000//100
num4 = number % 10000//1000
num5 = number % 100000//10000
num6 = number % 1000000//100000
num7 = number % 10000000//1000000
sum1 = 0
sum2 = 0
if num1%2 == 0:
sum1 = sum1 + num1
else:
sum2 = sum2 + num1
if num2%2 == 0:
sum1 = sum1 + num2
else:
sum2 = sum2 + num2
if num3%2 == 0:
sum1 = sum1 + num3
else:
sum2 = sum2 + num3
if num4%2 == 0:
sum1 = sum1 + num4
else:
sum2 = sum2 + num4
if num5%2 == 0:
sum1 = sum1 + num5
else:
sum2 = sum2 + num5
if num6%2 == 0:
sum1 = sum1 + num6
else:
sum2 = sum2 + num6
if num7%2 == 0:
sum1 = sum1 + num7
else:
sum2 = sum2 + num7
print("Сумма квадратов чётных и нечётных чисел = ", (sum1*2) + (sum2*2))
|
shops = input().split()
number_command = int(input())
for _ in range(number_command):
command_input = input().split()
command = command_input[0]
if command == "Include":
shop = command_input[1]
shops.append(shop)
elif command == "Visit":
first_last = command_input[1]
number_of_shops = int(command_input[2])
if number_of_shops > len(shops):
continue
if first_last == "first":
while number_of_shops > 0:
shops.pop(0)
number_of_shops -= 1
elif first_last == "last":
while number_of_shops > 0:
shops.pop(-1)
number_of_shops -= 1
elif command == "Prefer":
shop_index_1 = int(command_input[1])
shop_index_2 = int(command_input[2])
if 0 <= shop_index_1 < len(shops) and 0 <= shop_index_2 < len(shops):
shops[shop_index_1], shops[shop_index_2] = shops[shop_index_2], shops[shop_index_1]
elif command == "Place":
shop = command_input[1]
shop_index = int(command_input[2]) + 1
if shop_index == len(shops):
shops.append(shop)
elif 0 <= shop_index <= len(shops) - 1:
shops.insert(shop_index, shop)
print("Shops left:")
print(" ".join(shops))
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'design.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(436, 325)
Form.setMinimumSize(QtCore.QSize(309, 196))
Form.setStyleSheet(_fromUtf8(""))
self.gridLayout_2 = QtGui.QGridLayout(Form)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.lineEdit_2 = QtGui.QLineEdit(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit_2.sizePolicy().hasHeightForWidth())
self.lineEdit_2.setSizePolicy(sizePolicy)
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.gridLayout.addWidget(self.lineEdit_2, 2, 1, 1, 1)
self.label = QtGui.QLabel(Form)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 1, 1, 1)
self.lineEdit = QtGui.QLineEdit(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEdit.sizePolicy().hasHeightForWidth())
self.lineEdit.setSizePolicy(sizePolicy)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.gridLayout.addWidget(self.lineEdit, 2, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.tableWidget = QtGui.QTableWidget(Form)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.gridLayout_2.addWidget(self.tableWidget, 1, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "PyDB Massanger", None))
self.label.setText(_translate("Form", "Insert Message", None))
self.label_2.setText(_translate("Form", "Delete Mesage", None))
|
# -*- coding: utf-8 -*-
# @Author: Fallen
# @Date: 2020-04-21 21:58:18
# @Last Modified by: Fallen
# @Last Modified time: 2020-04-21 22:22:49
# # 定义一个用户类,用户名和密码是这个类的属性,实例化两个用户,分别有不同的用户名和密码
# # 设计一个方法 修改密码
# # 你得先登录才能修改密码
import time
import os
def auth(f):
def inner(*args,**kwargs):
if login():
ret = f(*args,**kwargs)
return ret
return inner
def login():
username = input("username:").strip()
password = input("password:").strip()
with open('userinfo',encoding='utf-8') as f1:
for i in f1:
uid,pwd = i.strip().split('|')
if username==uid and password==pwd:
print('login successfully')
return True
else:
print('login failed')
return False
class User(object):
"""docstring for user"""
def __init__(self, username,password):
super(User, self).__init__()
self.username = username
self.password = password
@auth
def fix_pass(self):
oldpwd = input('please input your old password:')
if oldpwd == self.password:
newpwd = input('please input your new password:')
with open('userinfo',encoding='utf-8',mode='r') as f2,open('userinfo.bak',encoding='utf-8',mode='w') as f3:
for line in f2:
uid,pwd = line.strip().split('|')
if uid == self.username:
self.password = newpwd.strip()
pwd = newpwd.strip()
f3.write("{}|{}\n".format(uid,pwd))
else:
f3.write('{}\n'.format(line))
os.remove('userinfo')
os.rename('userinfo.bak','userinfo')
return True
else:
return False
fallen = User('fallen','123456')
#print(fallen.__dict__)
fallen.fix_pass() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.