text stringlengths 8 6.05M |
|---|
import random
secret=random.randint(1,10)
print('---------我爱鱼c工作室-----------------')
temp=input("不妨猜一下小甲鱼现在心里想的是哪个数字:")
guess=int(temp)
i=1
while guess!=secret and i<=2:
temp=input("哎呀,猜错了,重新输入:")
guess=int(temp)
if guess ==secret:
print("我艹,你是小甲鱼心里的蛔虫吗?")
print("哼,猜中了也没有奖励!")
else:
if guess>secret:
print("哥,大了大了!")
i=i+1
else:
print("猜错了,小甲鱼现在心里想的是8!")
i=i+1
print("游戏结束了,不玩了。")
|
from smtplib import SMTPDataError
from django.conf import settings
from django.db import IntegrityError
from django.core.mail import get_connection
from celery.task import task
from .models import Blacklist, MessageLog
CONFIG = getattr(settings, 'CELERY_EMAIL_TASK_CONFIG', {})
BACKEND = getattr(settings, 'CELERY_EMAIL_BACKEND',
'django.core.mail.backends.smtp.EmailBackend')
TASK_CONFIG = {
'name': 'djcelery_email_send',
'ignore_result': True,
}
TASK_CONFIG.update(CONFIG)
@task(**TASK_CONFIG)
def send_emails(messages, **kwargs):
"""
send mails task
"""
logger = send_emails.get_logger()
conn = get_connection(backend=BACKEND)
conn.open()
num = 0
for message in messages:
# check blacklist
CHECK_BLACKLIST = getattr(
settings, 'DJCELERY_SES_CHECK_BLACKLIST', True)
if CHECK_BLACKLIST:
logger.debug('Check blacklist')
try:
Blacklist.objects.get(email=message.to[0], type=0)
logger.debug("Email already in blacklist.")
continue
except Blacklist.DoesNotExist:
pass
# send
try:
result = conn.send_messages([message])
logger.debug("Successfully sent email message to %r.", message.to)
MessageLog.objects.log(message, 1)
num += result
except SMTPDataError as e:
logger.warning("Message to %r, blacklisted.", message.to)
if e.smtp_code == 554:
MessageLog.objects.log(message, 3)
try:
Blacklist(email=message.to[0]).save()
except IntegrityError:
pass
except Exception as e:
MessageLog.objects.log(message, 2)
logger.warning(
"Failed to send email message to %r, retrying.", message.to)
if len(messages) == 1:
send_emails.retry(exc=e)
else:
send_emails.delay([message], **kwargs)
conn.close()
return num
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
print('---------------字符串常量的各种形式!---------------\n')
s = 'spa''m'
print('单引号\'spa\'\'m\':\n\t', s)
print('相连的两个字符串会自动合并!\n')
s = "spa'm"
print('双引号"spa\'m":\n\t', s)
print('单双引号复用,可以在单引号内加双引号,或者双引号内加单引号!\n')
s = '''spam'''
print("三引号'''spam''':\n\t", s)
print('三引号可以直接打出多行内容:')
s = """Fuck
you,
man!\n"""
print(s)
s = 'spa\n\tm'
print(r"转义字符:'spa\n\tm'", '\n\t', s, '\n')
s = r'sp\na\tm'
print(r"Raw字符串(不转义)r'sp\na\tm':", '\n\t', s, '\n')
s = b'sp\x01am'
print('Byte字符串:\n\t', s, '\n')
s = u'eggs\u0020spam'
print(r"Unicode字符串u'eggs\u0020spam':", '\n\t', s, '\n')
|
import json
import os
import requests
baseUrl = "https://www.lingq.com/api/"
def get_token():
token_url = baseUrl + "api-token-auth/"
username = os.environ['USER']
password = os.environ['PASS']
payload = {'username': username, 'password': password}
r = requests.post(token_url, data=payload)
print(r.url)
print(r.text)
def lingqs():
token = os.environ['TOKEN']
language = os.environ['LANGUAGE']
lingq_url = baseUrl + "languages/" + language + "/lingqs/"
headers = {'Authorization': 'Token ' + token}
r = requests.get(lingq_url, headers=headers)
with open("data.json", "w") as out_file:
out_file.write("[")
out_file.writelines((json.dumps(x) + ",\n") for x in r.json())
out_file.write("]")
lingqs()
|
#添加项目
import requests
from api.login.login import LogIn
host = "http://192.168.10.121:8088"
header = {"Content-Type":"application/x-www-form-urlencoded; charset=UTF-8"}
class AddProject:
def __init__(self,s=requests.session()):
self.s=s
def addProject(self,name="项目2",aliasname="项目02"):
url = host+"/project/add"
data = {
"name": name,
"aliasname": aliasname,
"started": "2021-06-07",
"ended": "2021-06-07",
"desc": "哈哈",
"id": "0"
}
#登陆
lg = LogIn(s=self.s).login()
r = self.s.post(url,data,header)
res = r.json()
return res
if __name__ == '__main__':
ap = AddProject().addProject(name="项目3",aliasname="项目03")
print(ap)
|
# 对每一个点都用dijkstra
class Solution:
def findTheCity(self, n: int, edges: List[List[int]], distanceThreshold: int) -> int:
g = [[] for _ in range(n)]
for x, y, w in edges:
g[x].append((y, w))
g[y].append((x, w))
res, minn = -1, 105
def dijkstra(g:List[List[tuple]], src:int) -> List[int]:
dis = [inf] * n
dis[src] = 0
# 堆优化
q = [(0, src)]
while q:
d, x = heappop(q)
if dis[x] < d:
continue
for y, w in g[x]:
newd = w + dis[x]
if newd < dis[y]:
dis[y] = newd
heappush(q, (newd, y))
return dis
for i in range(n):
dis = dijkstra(g, i)
cnt = 0
for j in range(n):
if j == i:
continue
if dis[j] <= distanceThreshold:
cnt += 1
if cnt <= minn:
minn = cnt
res = i
return res
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'lian'
__email__ = "liantian@188.com"
# Stdlib imports
import time
# Core Django imports
from django.views.generic.base import View
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.conf import settings
# Third-party app imports
# Imports from your apps
# from .models import Category
from .models import Post
# Create your models here.
PER_PAGE = getattr(settings, 'PER_PAGE', 30)
BLOG_TITLE = getattr(settings, 'BLOG_TITLE', "BLOG_TITLE")
BLOG_DESCRIPTION = getattr(settings, 'BLOG_DESCRIPTION', ["BLOG_DESCRIPTION"])
class BaseView(View):
__doc__ = """修改了系统自带的View,加入一个setup()的初始化函数"""
def setup(self, request):
self.response_dict = {}
self.request = request
pages = Post.objects.filter(type="page").all()
# categories = Category.objects.all()
# self.response_dict["categories"] = categories
self.response_dict["pages"] = pages
self.response_dict["PER_PAGE"] = PER_PAGE
self.response_dict["BLOG_TITLE"] = BLOG_TITLE
self.response_dict["BLOG_DESCRIPTION"] = BLOG_DESCRIPTION
def dispatch(self, request, *args, **kwargs):
self.setup(request)
return View.dispatch(self, request, *args, **kwargs)
@staticmethod
def response_json(data):
from .DjangoJSONEncoder import dumps
return HttpResponse(dumps(data), content_type="application/json")
@staticmethod
def http_response(data, content_type="application/json"):
return HttpResponse(data, content_type=content_type)
def response(self, mimetype="text/html", template_file=None):
return render_to_response(template_file,
self.response_dict,
content_type=mimetype,
context_instance=RequestContext(self.request, processors=[]))
@staticmethod
def go(urlname, args):
"""用法 return self.go(urlname="wiki-view-page",args=(page_id,))"""
return HttpResponseRedirect(reverse(urlname, args=args))
@staticmethod
def null_good():
# 服务器成功处理了请求,但未返回任何内容。
return HttpResponse(status=204)
def go_to_referer(self):
return HttpResponseRedirect(self.request.META['HTTP_REFERER'])
class IndexView(BaseView):
def get(self, request, page_num):
page_num = int(page_num)
posts = Post.objects.filter(type="blog", status="publish").all()[(page_num-1)*PER_PAGE:page_num*PER_PAGE]
self.response_dict["posts"] = posts
self.response_dict["page_num"] = page_num
return self.response(template_file="index.html")
class PostView(BaseView):
def get(self, request, year, month, day, url_slug):
# self.response_dict["url_slug"] = url_slug
post = Post.objects.filter(url_slug=url_slug).get()
self.response_dict["post"] = post
self.response_dict["url_slug"] = url_slug
return self.response(template_file="post.html")
|
import random
game_board_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
empty_game_board_list = []
for i in range(0, len(game_board_list)):
empty_game_board_list.append("*")
def print_board():
print("{} {} {} {}".format(game_board_list[0], game_board_list[1], game_board_list[2], game_board_list[3]))
print("{} {} {} {}".format(game_board_list[4], game_board_list[5], game_board_list[6], game_board_list[7]))
print("{} {} {} {}".format(game_board_list[8], game_board_list[9], game_board_list[10], game_board_list[11]))
print("{} {} {} {}".format(game_board_list[12], game_board_list[13], game_board_list[14], game_board_list[15]))
print("{} {} {} {}".format(empty_game_board_list[0], empty_game_board_list[1], empty_game_board_list[2],
empty_game_board_list[3]))
print("{} {} {} {}".format(empty_game_board_list[4], empty_game_board_list[5], empty_game_board_list[6],
empty_game_board_list[7]))
print("{} {} {} {}".format(empty_game_board_list[8], empty_game_board_list[9], empty_game_board_list[10],
empty_game_board_list[11]))
print("{} {} {} {}".format(empty_game_board_list[12], empty_game_board_list[13], empty_game_board_list[14],
empty_game_board_list[15]))
print(game_board_list)
temp_list_for_tiles = []
for i in range(1, 4):
new_tile = random.randrange(2, 3)
temp_list_for_tiles.append(new_tile)
new_tile_placement = random.randrange(0, 8)
print("tile num = " + str(new_tile) + " place of tile = " + str(new_tile_placement))
print(temp_list_for_tiles)
for i in range(0, 3):
print(empty_game_board_list)
remove_index_from_egb = random.randrange(0, 8)
empty_game_board_list.remove(empty_game_board_list[remove_index_from_egb])
empty_game_board_list.insert(remove_index_from_egb, temp_list_for_tiles[i])
print_board()
new_list = []
for i in range(0, 16):
if i % 4 == 0:
new_list.append(game_board_list[i])
print(new_list)
|
#!/usr/bin/env python2
import os
import ConfigParser
import time
from time import gmtime, strftime, sleep
import subprocess
import readline
class color:
HEADER = '\033[95m'
IMPORTANT = '\33[35m'
NOTICE = '\033[33m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
RED = '\033[91m'
WHITE = '\033[37m'
END = '\033[0m'
UNDERLINE = '\033[4m'
LOGGING = '\33[34m'
def logo():
print(color.RED+" ..,;:ccccccc:;...")
print(color.WHITE+" ..,clllc:;;;;;;:cllc,.")
print(color.RED+" .,cllc,..............';;'.")
print(color.WHITE+" .;lol;......"+color.WHITE+"_______"+color.RED+"....;lol;.")
print(color.RED+" .,lol;......"+color.WHITE+"/ _____/"+color.RED+".....;lol;.. ")
print(color.WHITE+" .coo......."+color.WHITE+"/ /"+color.RED+".............coo")
print(color.RED+".'lol,....."+color.WHITE+"/ /"+color.RED+"............'lol,.")
print(color.WHITE+".,lol,...."+color.WHITE+"/ /_____"+color.RED+"........,lol,.")
print(color.RED+".,lol,..."+color.WHITE+"/_______/"+color.RED+".......,lol,.")
print(color.WHITE+" .:ooc'.................:ooc'")
print(color.RED+" .'cllc'.............cllc.")
def clearScr():
os.system('clear')
def yesOrNo():
return (raw_input("Continue Y / N: ") in yes)
def return_fw():
bashCommand = ". ./cyanide-framework.sh && main"
output = subprocess.call(['bash','-c', bashCommand])
installDir = os.path.dirname(os.path.abspath(__file__)) + '/'
print(installDir)
configFile = installDir + "/cyanide.cfg"
print(installDir)
config = ConfigParser.RawConfigParser()
config.read(configFile)
toolDir = installDir + config.get('cyanide', 'toolDir')
logDir = installDir + config.get('cyanide', 'logDir')
yes = config.get('cyanide', 'yes').split()
color_random=[color.HEADER,color.IMPORTANT,color.NOTICE,color.OKBLUE,color.OKGREEN,color.WARNING,color.RED,color.END,color.UNDERLINE,color.LOGGING]
# random.shuffle(color_random)
continuePrompt = "\nClick [Return] to continue"
alreadyInstalled = "Already Installed"
class TBomb:
def __init__(self):
self.installDir = toolDir + "TBomb"
self.gitRepo = "https://github.com/TheSpeedX/TBomb.git"
# self.targetPrompt = color.RED + "\nLock Your Target " + color.WHITE + "IP/Subnet/Range/Host: "
if self.installed():
self.run()
else:
self.install()
self.run()
def installed(self):
return os.path.isfile(installDir + "tools/TBomb/TBomb.sh")
def install(self):
os.system("git clone --depth=1 %s %s" % (self.gitRepo, self.installDir))
os.system("cd %s && chmod 777 *.sh && sleep 1 && bash TBomb.sh" % self.installDir)
def run(self):
clearScr()
os.system("cd %s && bash TBomb.sh" % self.installDir)
TBomb()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.build_files.fmt.base import FmtBuildFilesRequest
from pants.backend.build_files.fmt.buildifier.subsystem import Buildifier
from pants.core.goals.fmt import FmtResult
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.engine.internals.native_engine import Digest, MergeDigests
from pants.engine.internals.selectors import Get
from pants.engine.platform import Platform
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
class BuildifierRequest(FmtBuildFilesRequest):
tool_subsystem = Buildifier
@rule(desc="Format with Buildifier", level=LogLevel.DEBUG)
async def buildfier_fmt(
request: BuildifierRequest.Batch, buildifier: Buildifier, platform: Platform
) -> FmtResult:
buildifier_tool = await Get(
DownloadedExternalTool, ExternalToolRequest, buildifier.get_request(platform)
)
input_digest = await Get(
Digest,
MergeDigests((request.snapshot.digest, buildifier_tool.digest)),
)
result = await Get(
ProcessResult,
Process(
argv=[buildifier_tool.exe, "-type=build", *request.files],
input_digest=input_digest,
output_files=request.files,
description=f"Run buildifier on {pluralize(len(request.files), 'file')}.",
level=LogLevel.DEBUG,
),
)
return await FmtResult.create(request, result)
def rules():
return [
*collect_rules(),
*BuildifierRequest.rules(),
]
|
# Generated by Django 2.2.5 on 2019-09-15 18:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('boards', '0009_auto_20190915_2358'),
]
operations = [
migrations.RemoveField(
model_name='done',
name='do',
),
migrations.AddField(
model_name='done',
name='did',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='boards.Did'),
preserve_default=False,
),
migrations.AddField(
model_name='go',
name='did',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='boards.Did'),
preserve_default=False,
),
migrations.AddField(
model_name='go',
name='done',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='boards.Done'),
preserve_default=False,
),
]
|
##this code can realize the function to extract doc_vector from doc in phrase and word
from __future__ import print_function
import numpy as np
import sys
import re
#build concept bag
conceptbag={'functions': 1, 'linear alegbra': 2, 'vector': 3, 'machine learning': 4, 'differential equations': 5}
#build the model of document
docs = open('phrase_test.txt').readlines()
# vectors=open("doc_vector.txt","w")
for line in docs:
words=[]
words_line = line.split()
for i in range(0,len(words_line)):
word = words_line[i]
word = re.sub(r'[^a-zA-Z]+','', word)
word = word.lower()
if word!='':
words.append(word)
print(words)
doc_vec=np.zeros(5)
for i in range(0,len(words)):
word = words[i]
if word in conceptbag:
doc_vec[conceptbag[word]-1]+=1
if(i<len(words)-1):
word = words[i]+' '+words[i+1]
if word in conceptbag:
doc_vec[conceptbag[word]-1]+=1
# doc_wordcount[num]= doc_vec
print(doc_vec)
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
from training import *
#score function
def score(Xag,W):
"""Input:
Xag : augmented numpy array of features
W : aumented coefficients
the bias coefficients correspond to the last row of the matrix W
Output:
out : numpy array of scores"""
out = np.dot(Xag, W)
return out
#Loss function and its gradient with respect to the parameters
def loss_gradient(Xag,Y,W, reg, Delta =1):
N = Xag.shape[0]
S = score(Xag,W)
label_score = S[np.arange(N),Y] #scores corresponding to correct label
#defining loss function
loss = np.sum(np.maximum(0,S-label_score[:, np.newaxis] + Delta))/N + 0.5*reg*np.sum(W[:-1,:]**2)/N -Delta
#computing the gradients
dmax = 1.0*((S-label_score[:,np.newaxis]+ Delta > 0))
grads= np.dot(Xag.T,dmax)
B = np.arange(W.shape[1])[np.newaxis,:] == Y[:,np.newaxis]
A = Xag*(np.sum(dmax, axis =1)[:,np.newaxis])
grads -= np.dot(A.T,B)
grads[:-1,:] += reg*W[:-1,:]
grads /= N
return loss,grads
#-----------------------------------------------------------------------
class SVM(object):
def __init__(self, input_dims, num_classes, reg):
self.num_classes = num_classes
self.params = {}
self.params['w'] = np.random.randn((input_dims +1),self.num_classes)
self.reg = reg
def loss(self,X,y=None):
Xaug = np.append(X,np.ones((X.shape[0],1)), axis = 1)
reg = self.reg
W = self.params['w']
if y is None:
return score(Xaug,W)
Loss, grad = loss_gradient(Xaug,y,W,reg)
grads={}
grads['w'] = grad
return Loss, grads
#3)Function to predict the class label
def class_predict(self,X):
return predict(X,self.loss) #invoving function from training.py
#4)Function to estimate accuracy of prediction
def model_accuracy(self,X,Y):
return accuracy(X,Y, self.loss)
#5)Function for stochastic training of neural network using Training class from training.py
def stoch_train(self,data_train, data_val,**extrargs):
"""data_train, data_val and extrargs are of the same type used
in Training class constructor."""
model = self
Trainer = Training(model,data_train, data_val,**extrargs) #
Trainer.train()
|
from abc import ABCMeta, abstractmethod
from threading import Thread
class Scraper(Thread):
__metaclass__ = ABCMeta
TYPE = None
@abstractmethod
def get(self): pass
@abstractmethod
def run(self): pass
|
from __future__ import division, print_function
import time
import matplotlib.pyplot as plt
import numpy as np
import os
from keras.models import Model, Sequential
from keras.layers import Activation, Dense, Flatten, BatchNormalization, Dropout, Input, Reshape, multiply
from keras.layers import Embedding, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Nadam, Adam, SGD
from keras.datasets import mnist
import tensorflow as tf
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Image shape information
img_rows = X_train.shape[1]
img_cols = X_train.shape[2]
if len(X_train.shape) == 4:
channels = X_train.shape[3]
else:
channels = 1
img_shape = (img_rows, img_cols, channels)
num_classes = 10
latent_dim = 100
optimizer = Adam(0.0002, 0.5)
def generator():
model = Sequential()
model.add(Dense(256, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(img_shape), activation='tanh'))
model.add(Reshape(img_shape))
#model.summary()
noise = Input(shape=(latent_dim,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(num_classes, latent_dim)(label))
model_input = multiply([noise, label_embedding])
img = model(model_input)
return Model([noise, label], img)
def discriminator():
model = Sequential()
model.add(Dense(512, input_dim=np.prod(img_shape)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
#model.summary()
img = Input(shape=img_shape)
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(num_classes, np.prod(img_shape))(label))
flat_img = Flatten()(img)
model_input = multiply([flat_img, label_embedding])
validity = model(model_input)
return Model([img, label], validity)
# Build the generator
generator = generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
generator.load_weights('../Q2/saved_model_weights/version1/generator_weights_99000.h5')
path_save_model = 'save_weight_classifier/version_1.h5'
model = tf.keras.models.load_model(path_save_model)
def save_imgs(epoch, parent_save_path, version):
r, c = 2, 5
noise = np.random.normal(0, 1, (r * c, 100))
sampled_labels = np.arange(0, 10).reshape(-1, 1)
gen_imgs = generator.predict([noise, sampled_labels])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt,:,:,0], cmap='gray')
axs[i,j].set_title("Digit: %d" % sampled_labels[cnt])
axs[i,j].axis('off')
cnt += 1
fig.savefig(parent_save_path + "/version_" + str(version) + "_epoch_" + str(epoch))
plt.close()
def measure_success_rate_generated_image(num_imgs_each_digit):
noise = np.random.normal(0,1, (num_imgs_each_digit*10, latent_dim))
tmp = []
for digit in range(10):
for _ in range(num_imgs_each_digit):
tmp.append(digit)
sampled_labels = np.array(tmp)
gen_imgs = generator.predict([noise, sampled_labels])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
count_success = 0
for i in range(gen_imgs.shape[0]):
pred1 = model.predict(gen_imgs[i,:,:].reshape(1, 28, 28, 1))
class_pred = pred1.argmax()
if class_pred == sampled_labels[i]:
count_success += 1
print("percentage of success: ", count_success/gen_imgs.shape[0])
def measure_success_rate_mnist_test_set():
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Reshaping the array to 4-dims so that it can work with the Keras API
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
# Making sure that the values are float so that we can get decimal points after division
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Normalizing the RGB codes by dividing it to the max RGB value.
x_train /= 255
x_test /= 255
model.evaluate(x_test, y_test)
measure_success_rate_generated_image(1000)
measure_success_rate_mnist_test_set()
|
# This handles the TTYPE request per the MTTS spec (https://tintin.sourceforge.io/protocols/mtts/)
MTTS = chr(24) # terminal type
IAC = chr(255) # "Interpret As Command"
SE = chr(240) # Subnegotiation End
SB = chr(250) # Subnegotiation Begin
# Our bitvector for MTTS:
# 1 "ANSI" Client supports all common ANSI color codes.
# 2 "VT100" Client supports all common VT100 codes.
# 4 "UTF-8" Client is using UTF-8 character encoding.
# 8 "256 COLORS" Client supports all 256 color codes.
# 256 "TRUECOLOR" Client supports all truecolor codes.
#
# 256 + 8 + 4 + 2 + 1 == 271
# TODO maybe someday:
# 16 "MOUSE TRACKING" Client supports xterm mouse tracking.
# 32 "OSC COLOR PALETTE" Client supports the OSC color palette.
replies = [ 'WXPYMOO', 'VT100-TRUECOLOR', 'MTTS 271', 'MTTS 271' ]
def handle_mtts(payload, conn):
conn.mtts_reply = getattr(conn, 'mtts_reply', 0)
reply = replies[conn.mtts_reply]
print("Got IAC MTTS subrequest; Sending " + reply)
conn.output(IAC + SB + MTTS + chr(0) + reply + IAC + SE)
conn.UpdateIcon('MTTS', f'MTTS Enabled: {reply}')
# Bump to the next element, but start over if we run off the end.
conn.mtts_reply += 1
if conn.mtts_reply > len(replies)-1:
conn.mtts_reply = 0
|
import win32com.client
import os
import time
while True:
qinfo=win32com.client.Dispatch("MSMQ.MSMQQueueInfo")
computer_name = os.getenv('COMPUTERNAME')
qinfo.FormatName="direct=os:"+computer_name+"\\PRIVATE$\\save-deleted"
queue=qinfo.Open(1,0)
msg=queue.Receive()
print("------------------------------------------/")
print("Title:",msg.Label)
print(" ")
print("Body:",msg.Body)
print("-------------------------------------------")
time.sleep(2)
queue.Close() |
from csv import DictReader
import re
import json
from os import path
CONFIG_FILENAME = "config.json"
#loading config data
def load_config(filename):
try:
config = open(filename,encoding='utf-8')
return json.load(config)
except:
print("your config file is malformed")
def wrap_tag(content, tag='p'):
if tag=="a":
return f"<{tag} href={content}> more info </{tag}>"
return f"<{tag}> {content} </{tag}>\n"
def generate_html_list(filename):
generated_list=""
config_data = load_config(CONFIG_FILENAME)
with open('../data/'+filename+".csv", 'r', encoding='utf-8') as read_obj:
#read csv file
csv_dict_reader = DictReader(read_obj)
for row in csv_dict_reader:
formated_string = ""
row = dict(row)
#extract field names
fieldnames = list(config_data[filename].keys())
for field in fieldnames:
#check for non empty values
if row[field].strip() != "":
#add value to single block (wrapping with tag if exists)
formated_string += "{}, ".format(wrap_tag(row[field],config_data[filename][field]) if config_data[filename][field] != "" else row[field])
#wrap list by li tag
if formated_string.strip() != "": generated_list += wrap_tag(formated_string[:-2], "li")
return generated_list
def write_generated_list(filename, generated_list):
with open('../'+filename+".html", 'r', encoding='utf-8') as file :
filedata = file.read()
with open('../'+filename+".html", 'w', encoding='utf-8') as file :
#find ul tag and replace it with the generated list
injected_list=re.sub("(?is)<ul[^>]*>(.*?)<\/ul>", wrap_tag(generate_html_list(filename),"ul"), filedata)
file.write(injected_list)
def verify_config_files_fields():
config_data = load_config(CONFIG_FILENAME)
#extract file names
files_list = config_data.keys()
for filename in files_list:
filename='../data/'+filename+".csv"
#check if file exists
if path.exists(filename):
with open(filename, "r", encoding='utf-8') as f:
csv_dict_reader = DictReader(f)
#extract file headers
headers = csv_dict_reader.fieldnames
#compare if config fields exists in headers
fieldname_compare = set(config_data[filename.split("/")[2].split('.')[0]].keys() - set(headers))
if len(fieldname_compare) > 0:
print(f"{str(fieldname_compare)} not found")
return False
else:
print(f'{filename} does not exist')
return False
return True
if __name__ == "__main__":
check_config = verify_config_files_fields()
if check_config:
config_data = load_config(CONFIG_FILENAME)
files_list = config_data.keys()
for filename in files_list:
generated_list = generate_html_list(filename)
write_generated_list(filename, generated_list) |
import datetime
from configparser import ConfigParser
from typing import List
from common.models.Event import Event
from common.utils.CommonEventUtils import CommonEventUtils
class MedalEventUtils(CommonEventUtils):
events: List[Event] = []
def __init__(self, config: ConfigParser):
startString = config.get("settings", "x2")
self.events = self.init(startString, duration=2, daysOff=13)
def isDoubleDungeon(self, day: datetime.date) -> bool:
return self.isDay(day, self.events)
|
from keras.applications import DenseNet121
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras import layers
from keras.optimizers import Adam
def build_model():
densenet = DenseNet121(
weights='DenseNet-BC-121-32-no-top.h5',
include_top=False,
input_shape=(224,224,3)
)
model = Sequential()
model.add(densenet)
model.add(layers.GlobalMaxPooling2D())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(5, activation='sigmoid'))
model.compile(
loss='binary_crossentropy',
optimizer=Adam(lr=0.0001),
metrics=['accuracy']
)
return model
|
import ctypes
def make_msg_box_popup():
# create msg_box
MessageBox = ctypes.windll.user32.MessageBoxW
title = 'If this message box appears, the program has worked'
msg = 'This works fine when running as a .py file and when built with pyinstaller in Python 3.7, but when trying to run this on another compute after building with pyinstaller, an error will occur before this box can appear'
# show the msg_box
type_num = None
MessageBox(None, msg, title, type_num)
|
# -*- coding: utf-8 -*-
import pytest
from .conftest import TEST_UPSTREAM_CONFIG
from jussi.request import JussiJSONRPCRequest
from jussi.validators import is_get_block_header_request
from jussi.validators import is_get_block_request
from jussi.validators import is_valid_get_block_response
from jussi.validators import is_valid_jsonrpc_response
from jussi.validators import is_valid_jussi_response
from jussi.validators import is_valid_non_error_jsonrpc_response
from jussi.validators import is_valid_non_error_single_jsonrpc_response
from jussi.validators import is_valid_single_jsonrpc_response
from jussi.upstream import _Upstreams
from .conftest import AttrDict
dummy_request = AttrDict()
dummy_request.headers = dict()
dummy_request['jussi_request_id'] = '123456789012345'
dummy_request.app = AttrDict()
dummy_request.app.config = AttrDict()
dummy_request.app.config.upstreams = _Upstreams(TEST_UPSTREAM_CONFIG, validate=False)
request = JussiJSONRPCRequest.from_request(dummy_request, 0, {
"id": "1", "jsonrpc": "2.0",
"method": "get_block", "params": [1000]
})
request2 = JussiJSONRPCRequest.from_request(dummy_request, 1, {
"id": "1", "jsonrpc": "2.0", "method": "call",
"params": ["database_api", "get_block", [1000]]
})
response = {
"id": 1,
"result": {
"previous": "000003e7c4fd3221cf407efcf7c1730e2ca54b05",
"timestamp": "2016-03-24T16:55:30",
"witness": "initminer",
"transaction_merkle_root": "0000000000000000000000000000000000000000",
"extensions": [],
"witness_signature": "207f15578cac20ac0e8af1ebb8f463106b8849577e21cca9fc60da146d1d95df88072dedc6ffb7f7f44a9185bbf9bf8139a5b4285c9f423843720296a44d428856",
"transactions": [],
"block_id": "000003e8b922f4906a45af8e99d86b3511acd7a5",
"signing_key": "STM8GC13uCZbP44HzMLV6zPZGwVQ8Nt4Kji8PapsPiNq1BK153XTX",
"transaction_ids": []}}
bad_response1 = {
"id": 1,
"result": {
"previous": "000003e7c4fd3221cf407efcf7c1730e2ca54b05",
"timestamp": "2016-03-24T16:55:30",
"witness": "initminer",
"transaction_merkle_root": "0000000000000000000000000000000000000000",
"extensions": [],
"witness_signature": "207f15578cac20ac0e8af1ebb8f463106b8849577e21cca9fc60da146d1d95df88072dedc6ffb7f7f44a9185bbf9bf8139a5b4285c9f423843720296a44d428856",
"transactions": [],
"block_id": "00000",
"signing_key": "STM8GC13uCZbP44HzMLV6zPZGwVQ8Nt4Kji8PapsPiNq1BK153XTX",
"transaction_ids": []}}
bad_response2 = {
"id": 1,
"result": {
"previous": "000003e7c4fd3221cf407efcf7c1730e2ca54b05",
"timestamp": "2016-03-24T16:55:30",
"witness": "initminer",
"transaction_merkle_root": "0000000000000000000000000000000000000000",
"extensions": [],
"witness_signature": "207f15578cac20ac0e8af1ebb8f463106b8849577e21cca9fc60da146d1d95df88072dedc6ffb7f7f44a9185bbf9bf8139a5b4285c9f423843720296a44d428856",
"transactions": [],
"block_id": "000004e8b922f4906a45af8e99d86b3511acd7a5",
"signing_key": "STM8GC13uCZbP44HzMLV6zPZGwVQ8Nt4Kji8PapsPiNq1BK153XTX",
"transaction_ids": []}}
bh_request1 = JussiJSONRPCRequest.from_request(dummy_request, 0, {
"id": "1", "jsonrpc": "2.0",
"method": "get_block_header", "params": [1000]
})
bh_request2 = JussiJSONRPCRequest.from_request(dummy_request, 0, {
"id": "1", "jsonrpc": "2.0", "method": "call",
"params": ["database_api", "get_block_header", [1000]]
})
batch_request = [request, request2]
batch_response = [response, response]
error_response = {"id": "1", "jsonrpc": "2.0", "error": {}}
@pytest.mark.parametrize('req,expected', [
(request, True),
(request2, True),
(dict(jsonrpc='2.0', method='m'), False)
])
def test_is_get_block_request(req, expected):
if not isinstance(req, JussiJSONRPCRequest):
req = JussiJSONRPCRequest.from_request(dummy_request, 0, req)
assert is_get_block_request(req) is expected
@pytest.mark.parametrize('req,expected', [
(request, False),
(request, False),
(request2, False),
(bh_request1, True),
(bh_request2, True),
(dict(jsonrpc='2.0', method='m'), False),
(dict(jsonrpc='2.0', method='m'), False),
(dict(jsonrpc='2.0', method='m'), False),
(dict(jsonrpc='2.0', method='m'), False),
(dict(jsonrpc='2.0', method='m'), False)
])
def test_is_get_block_header_request(req, expected):
if not isinstance(req, JussiJSONRPCRequest):
req = JussiJSONRPCRequest.from_request(dummy_request, 0, req)
assert is_get_block_header_request(req) is expected
@pytest.mark.parametrize('req,response,expected', [
(request, response, True),
(request2, response, True),
(request, error_response, False),
(dict(jsonrpc='2.0', method='m'), [], False),
(dict(jsonrpc='2.0', method='m'), dict(), False),
(dict(jsonrpc='2.0', method='m'), '', False),
(dict(jsonrpc='2.0', method='m'), b'', False),
(dict(jsonrpc='2.0', method='m'), None, False),
(request, [], False),
(request, [dict()], False),
(request, dict(), False),
(request, '', False),
(request, b'', False),
(request, None, False),
(dict(jsonrpc='2.0', method='m'), response, False),
])
def test_is_valid_get_block_response(req, response, expected):
if not isinstance(req, JussiJSONRPCRequest):
req = JussiJSONRPCRequest.from_request(dummy_request, 0, req)
assert is_valid_get_block_response(req, response) is expected
@pytest.mark.parametrize('req,resp,expected', [
(request, response, True),
(request2, response, True),
(request, error_response, True),
([], [], False),
(dict(), dict(), False),
('', '', False),
(b'', b'', False),
(None, None, False),
(request, [], False),
(request, [dict()], False),
(request, dict(), False),
(request, '', False),
(request, b'', False),
(request, None, False),
([], response, False),
([dict()], response, False),
(dict(), response, False),
('', response, False),
(b'', response, False),
(None, response, False),
([request, request], [response], False),
([request], [response, response], False),
])
def test_is_valid_jsonrpc_response(req, resp, expected):
# if not isinstance(req, JussiJSONRPCRequest):
# req = JussiJSONRPCRequest.from_request(dummy_request,0,req)
assert is_valid_jsonrpc_response(req, resp) is expected
def test_is_valid_jsonrpc_response_using_steemd(steemd_requests_and_responses):
req, resp = steemd_requests_and_responses
req = JussiJSONRPCRequest.from_request(dummy_request, 0, req)
assert is_valid_jsonrpc_response(req, resp) is True
@pytest.mark.parametrize('value,expected', [
(response, True),
(error_response, True),
(request, False),
(batch_request, False),
(batch_response, False),
([], False),
([dict()], False),
(dict(), False),
('', False),
(b'', False),
(None, False)
])
def test_is_valid_single_jsonrpc_response(value, expected):
assert is_valid_single_jsonrpc_response(value) is expected
def test_is_valid_single_jsonrpc_response_using_steemd(
steemd_requests_and_responses):
req, resp = steemd_requests_and_responses
assert is_valid_single_jsonrpc_response(resp) is True
@pytest.mark.parametrize('value,expected', [
(request, False),
(response, True),
(batch_request, False),
(batch_response, False),
(error_response, False),
([], False),
([dict()], False),
(dict(), False),
('', False),
(b'', False),
(None, False)
])
def test_is_valid_non_error_single_jsonrpc_response(value, expected):
assert is_valid_non_error_single_jsonrpc_response(value) is expected
def test_is_valid_non_error_single_jsonrpc_response_using_steemd(
steemd_requests_and_responses):
req, resp = steemd_requests_and_responses
assert is_valid_non_error_single_jsonrpc_response(resp) is True
@pytest.mark.parametrize('req,resp,expected', [
(request, response, True),
(request2, response, True),
(request, error_response, False),
([], [], False),
(dict(), dict(), False),
('', '', False),
(b'', b'', False),
(None, None, False),
(request, [], False),
(request, [dict()], False),
(request, dict(), False),
(request, '', False),
(request, b'', False),
(request, None, False),
([], response, False),
([dict()], response, False),
(dict(), response, False),
('', response, False),
(b'', response, False),
(None, response, False),
([request, request], [response], False),
([request], [response, response], False),
])
def test_is_valid_non_error_jsonrpc_response(req, resp, expected):
# if not isinstance(req, JussiJSONRPCRequest):
# req = JussiJSONRPCRequest.from_request(dummy_request,0,req)
assert is_valid_non_error_jsonrpc_response(req, resp) is expected
def test_is_valid_non_error_jsonrpc_response_using_steemd(
steemd_requests_and_responses):
req, resp = steemd_requests_and_responses
req = JussiJSONRPCRequest.from_request(dummy_request, 0, req)
assert is_valid_non_error_jsonrpc_response(req, resp) is True
@pytest.mark.parametrize('req,resp,expected', [
(request, response, True),
(request2, response, True),
(request, error_response, False),
([], [], False),
(dict(), dict(), False),
('', '', False),
(b'', b'', False),
(None, None, False),
(request, [], False),
(request, [dict()], False),
(request, dict(), False),
(request, '', False),
(request, b'', False),
(request, None, False),
([], response, False),
([dict()], response, False),
(dict(), response, False),
('', response, False),
(b'', response, False),
(None, response, False),
([request, request], [response], False),
([request], [response, response], False),
(request, bad_response1, False),
(request, bad_response2, False),
([request, request], [response, bad_response1], False),
([request, request], [response, bad_response2], False),
([request, request], [bad_response1], False)
])
def test_is_valid_jussi_response(req, resp, expected):
# if not isinstance(req, JussiJSONRPCRequest):
# req = JussiJSONRPCRequest.from_request(dummy_request,0,req)
assert is_valid_jussi_response(req, resp) is expected
def test_is_valid_jussi_response_using_steemd(steemd_requests_and_responses):
req, resp = steemd_requests_and_responses
req = JussiJSONRPCRequest.from_request(dummy_request, 0, req)
assert is_valid_jussi_response(req, resp) is True
|
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
# Originally contributed by Check Point Software Technologies, Ltd.
import math
import filecmp
try:
import ImageChops
from PIL import Image
HAVE_PIL = True
except:
try:
from PIL import ImageChops
from PIL import Image
HAVE_PIL = True
except:
HAVE_PIL = False
class Screenshot:
"""Get screenshots."""
def have_pil(self):
"""Is Python Image Library installed?
@return: installed status.
"""
return HAVE_PIL
def equal_old(self, img1, img2):
"""Compares two screenshots using Root-Mean-Square Difference (RMS).
@param img1: screenshot to compare.
@param img2: screenshot to compare.
@return: equal status.
"""
if not HAVE_PIL:
return None
image1 = Image.open(img1)
image2 = Image.open(img2)
# To get a measure of how similar two images are, we use
# root-mean-square (RMS). If the images are exactly identical,
# this value is zero.
diff = ImageChops.difference(image1, image2)
h = diff.histogram()
sq = (value*((idx % 256)**2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares/float(image1.size[0] * image2.size[1]))
# Might need to tweak the threshold.
return rms < 8
def equal(self, img1, img2):
return filecmp.cmp(img1, img2)
|
import networks
import torch
import os
import numpy as np
import utils
from collections import OrderedDict
from torch.autograd import Variable
class Pix2PixModel:
def name(self):
return 'Pix2PixModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
self.isTrain = opt.isTrain
# define tensors
self.input_A = self.Tensor(opt.batchSize, opt.input_nc,
opt.fineSize, opt.fineSize)
self.input_B = self.Tensor(opt.batchSize, opt.output_nc,
opt.fineSize, opt.fineSize)
# load/define networks
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
opt.norm, opt.use_dropout, self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf,
opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
if not self.isTrain or opt.continue_train:
self.load_network(self.netG, 'G', opt.which_epoch)
print('Loaded checkpoint at epoch {}'.format(opt.which_epoch))
if self.isTrain:
self.load_network(self.netD, 'D', opt.which_epoch)
if self.isTrain:
self.fake_AB_pool = utils.ImagePool(opt.pool_size)
self.old_lr = opt.lr
# define loss functions
self.criterionGAN = networks.GANLoss(
use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
print('---------- Networks initialized -------------')
networks.print_network(self.netG)
networks.print_network(self.netD)
print('-----------------------------------------------')
def set_input(self, in_):
AtoB = self.opt.which_direction == 'AtoB'
input_A = in_['A' if AtoB else 'B']
input_B = in_['B' if AtoB else 'A']
self.input_A.resize_(input_A.size()).copy_(input_A)
self.input_B.resize_(input_B.size()).copy_(input_B)
def forward(self):
self.real_A = Variable(self.input_A)
self.fake_B = self.netG.forward(self.real_A)
self.real_B = Variable(self.input_B)
# no backprop gradients
def test(self):
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG.forward(self.real_A)
self.real_B = Variable(self.input_B, volatile=True)
def backward_D(self):
# Fake
# stop backprop to the generator by detaching fake_B
fake_AB = self.fake_AB_pool.query(
torch.cat((self.real_A, self.fake_B), 1))
self.pred_fake = self.netD.forward(fake_AB.detach())
self.loss_D_fake = self.criterionGAN(self.pred_fake, False)
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1) # .detach()
self.pred_real = self.netD.forward(real_AB)
self.loss_D_real = self.criterionGAN(self.pred_real, True)
# Combined loss
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD.forward(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(
self.fake_B, self.real_B) * self.opt.lambda_A
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
self.optimizer_D.zero_grad()
self.backward_D()
self.optimizer_D.step()
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
def get_current_errors(self):
return OrderedDict([('G_GAN', self.loss_G_GAN.data[0]),
('G_L1', self.loss_G_L1.data[0]),
('D_real', self.loss_D_real.data[0]),
('D_fake', self.loss_D_fake.data[0])
])
def get_current_visuals(self):
real_A = utils.tensor2im(self.real_A.data)
fake_B = utils.tensor2im(self.fake_B.data)
real_B = utils.tensor2im(self.real_B.data)
return OrderedDict([('real_A', real_A), ('fake_B', fake_B), ('real_B', real_B)])
def save(self, label):
self.save_network(self.netG, 'G', label, self.gpu_ids)
self.save_network(self.netD, 'D', label, self.gpu_ids)
def update_learning_rate(self):
lrd = self.opt.lr / self.opt.niter_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_D.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr
def save_network(self, network, network_label, epoch_label, gpu_ids):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if len(gpu_ids) and torch.cuda.is_available():
network.cuda(device_id=gpu_ids[0])
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
network.load_state_dict(torch.load(save_path))
|
import serial
import time
def init_serial():
CONUM = 1
global ser
ser = serial.Serial()
ser.baudrate = 115200
ser.port = "/dev/ttyUSB0"
ser.timeout = 10
ser.open()
if ser.isOpen():
print ('Open: ' + ser.portstr)
def set_mode(mode):
if (mode == 0):
sent = bytes([7])
ser.write(sent)
print(sent)
elif (mode == 1):
sent = bytes([131])
ser.write(sent)
print(sent)
elif (mode == 3): #full mode
sent = bytes([132])
ser.write(sent)
print(sent)
def start_data():
sent = bytes([128])
ser.write(sent)
print(sent)
def stop_data():
sent = bytes([173])
ser.write(sent)
print(sent)
def clean():
sent = bytes([135])
ser.write(sent)
print(sent)
def go_home(): #seek dock
sent = bytes([143])
ser.write(sent)
print(sent)
def forward():
sent = bytes([145, 0,127,0,127])
ser.write(sent)
print(sent)
def backward():
sent = bytes([145, 255,127,255,127])
ser.write(sent)
print(sent)
def turn_left():
sent = bytes([145, 0,127,255,127])
ser.write(sent)
print(sent)
def turn_right():
sent = bytes([145, 255,127,0,127])
ser.write(sent)
print(sent)
def stop():
sent = bytes([145, 0,0,0,0])
ser.write(sent)
print(sent)
def vaccum_disable():
sent = bytes([138, 0])
ser.write(sent)
print(sent)
|
a = 10.0
b = 3
c = a//b # floor division
print(c, type(c)) |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
class LogisticRegression(object):
def __init__(self, learning_rate=.01, coverage_change=.001, max_steps=10**3):
self.__learning_rate = learning_rate
self.__coverage_change = coverage_change
self.__max_steps = max_steps
self.w = None
def __logistic(self, x):
return 1.0 / (1.0 + np.exp(-np.dot(x, self.w.T)))
def __J(self, X, y):
return np.mean(-y * np.log(self.__logistic(X)) - (1.0 - y) * np.log(1.0 - self.__logistic(X)))
def train(self, X, y):
self.w = np.zeros(X.shape[1])
cost = self.__J(X, y)
delta_cost = 1
step = 0
while delta_cost > self.__coverage_change and step < self.__max_steps:
old_cost = cost
gradient = np.dot(self.__logistic(X)-y, X)
self.w -= self.__learning_rate*gradient
cost = self.__J(X, y)
delta_cost = old_cost-cost
step += 1
def predict(self, X):
return np.where(self.__logistic(X) >= .5, 1, 0)
def plot(w, X, y):
x_0 = X[np.where(y == 0)]
x_1 = X[np.where(y == 1)]
plt.scatter([x_0[:, 1]], [x_0[:, 2]], c='b', label='y = 0')
plt.scatter([x_1[:, 1]], [x_1[:, 2]], c='r', label='y = 1')
reg_line_x = np.arange(0, 1, 0.1)
reg_line_y = -(w[0]+w[1]*reg_line_x)/w[2]
plt.plot(reg_line_x, reg_line_y, c='k', label='reg line')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.show()
if __name__ == "__main__":
raw_df = pd.read_csv("dataset1.csv", names=['X1', 'X2', 'Y'])
normalized_df = (raw_df - raw_df.min()) / (raw_df.max() - raw_df.min())
train, test = train_test_split(normalized_df, test_size=0.2)
train_y = train['Y'].values
train_X = train.drop(['Y'], axis=1)
train_X.insert(0, 'X0', np.ones(train['X1'].values.shape[0]))
train_X = train_X.values
test_y = test['Y'].values
test_X = test.drop(['Y'], axis=1)
test_X.insert(0, 'X0', np.ones(test['X1'].values.shape[0]))
test_X = test_X.values
lreg = LogisticRegression()
lreg.train(train_X, train_y)
pred_y = lreg.predict(test_X)
plot(lreg.w, train_X, train_y)
plt.clf()
plot(lreg.w, test_X, pred_y)
print('Accuracy=', np.sum(pred_y == test_y)/len(test_y))
pass
|
from socket import *
from json import
class Personnage(object):
def __init__(self, pseudo, serveur, connexion):
self.estVivant = True
self.estProtege = False
self.estMaire = False
self.accesChat = 1
self.pseudo = pseudo
self.connexion = serveur
def tuer(self):
valide = False
if self.estProtege != True:
valide = True
return valide
def meurt(self):
self.communique([])
def vote(self, pseudo):
pouvoir = 1
if self.estMaire == True:
pouvoir = 2
self.communique(["vote", pseudo, str(pouvoir)])
def communique(self, instructions):
commande = instructions[0]
for i in range(1, len(instructions)):
commande = commande + " " + instructions[i]
self.connexion.send(commande.encode('Utf-8'))
|
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import column, grid
from bokeh.models import Band, ColumnDataSource
def plot_ideal_functions(ideal_functions, file_name):
"""
Plots all ideal functions
:param ideal_functions: list of ideal functions
:param file_name: the name the .html file should get
"""
ideal_functions.sort(key=lambda ideal_function: ideal_function.training_function.name, reverse=False)
plots = []
for ideal_function in ideal_functions:
p = plot_graph_from_two_functions(line_function=ideal_function, scatter_function=ideal_function.training_function,
squared_error=ideal_function.error)
plots.append(p)
output_file("{}.html".format(file_name))
# Observe here how unpacking is used to provide the arguments
show(column(*plots))
def plot_points_with_their_ideal_function(points_with_classification, file_name):
"""
Plot all points that have a matched classification
:param points_with_classification: a list containing dicts with "classification" and "point"
:param file_name: the name the .html file should get
"""
plots = []
for index, item in enumerate(points_with_classification):
if item["classification"] is not None:
p = plot_classification(item["point"], item["classification"])
plots.append(p)
output_file("{}.html".format(file_name))
show(column(*plots))
def plot_graph_from_two_functions(scatter_function, line_function, squared_error):
"""
plots a scatter for the train_function and a line for the ideal_function
:param scatter_function: the train function
:param line_function: ideal function
:param squared_error: the squared error will be plotted in the title
"""
f1_dataframe = scatter_function.dataframe
f1_name = scatter_function.name
f2_dataframe = line_function.dataframe
f2_name = line_function.name
squared_error = round(squared_error, 2)
p = figure(title="train model {} vs ideal {}. Total squared error = {}".format(f1_name, f2_name, squared_error),
x_axis_label='x', y_axis_label='y')
p.scatter(f1_dataframe["x"], f1_dataframe["y"], fill_color="red", legend_label="Train")
p.line(f2_dataframe["x"], f2_dataframe["y"], legend_label="Ideal", line_width=2)
return p
def plot_classification(point, ideal_function):
"""
plots the classification function and a point on top. It also displays the tolerance
:param point: a dict with "x" and "y"
:param ideal_function: a classification object
"""
if ideal_function is not None:
classification_function_dataframe = ideal_function.dataframe
point_str = "({},{})".format(point["x"], round(point["y"], 2))
title = "point {} with classification: {}".format(point_str, ideal_function.name)
p = figure(title=title, x_axis_label='x', y_axis_label='y')
# draw the ideal function
p.line(classification_function_dataframe["x"], classification_function_dataframe["y"],
legend_label="Classification function", line_width=2, line_color='black')
# procedure to show the tolerance within the graph
criterion = ideal_function.tolerance
classification_function_dataframe['upper'] = classification_function_dataframe['y'] + criterion
classification_function_dataframe['lower'] = classification_function_dataframe['y'] - criterion
source = ColumnDataSource(classification_function_dataframe.reset_index())
band = Band(base='x', lower='lower', upper='upper', source=source, level='underlay',
fill_alpha=0.3, line_width=1, line_color='green', fill_color="green")
p.add_layout(band)
# draw the point
p.scatter([point["x"]], [round(point["y"], 4)], fill_color="red", legend_label="Test point", size=8)
return p
|
class Flow(object):
name='Flow'
rate = 0.0
def __init__(self, name=None, rate=0):
self.name = name
self.rate = rate
### placeholder to recompute rate when needed
def update_rate(self):
return(self.rate)
def step(self):
return(self.rate)
class LinearEQFlow(Flow):
dependencies = {}
def update_rate(self):
self.rate = sum([d.value * weight for d, weight in self.dependencies.items()])
return(self.rate)
class ExponentialFlow(Flow):
dependencies = {}
def update_rate(self):
self.rate = sum([d.value ** exponent for d, exponent in self.dependencies.items()])
return(self.rate) |
import sys
import os.path
import katcp_wrapper
roach_board, ext = os.path.splitext(os.path.basename(sys.argv[0]))
fpga = katcp_wrapper.FpgaClient(roach_board)
fpga.is_connected()
|
# -*- coding: utf-8 -*-
import json
import requests
from lxml import etree
from threading import Timer
vsite_api = "https://www.v2ex.com/?tab=hot"
bsite_api = 'https://www.bilibili.com/ranking/all/0/0/1'
weibo_api = "https://s.weibo.com/top/summary?cate=realtimehot"
tieba_api = "http://tieba.baidu.com/hottopic/browse/topicList?res_type=1"
zhihu_api = 'https://www.zhihu.com/api/v3/feed/topstory/hot-lists/total?limit=50&desktop=true'
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"}
#组装数据
def packdata(para_data):
list_data = []
for i in para_data:
data = {}
data["title"]=i[0]
data["url"]=i[1]
list_data.append(data)
return list_data
class Spider(object):
def __init__(self,url=None):
if url!=None:
self.url = url
self.res = requests.get(url,headers=headers)
self.res.encoding = "utf-8"
self.soup = etree.HTML(self.res.text)
#知乎热榜
def spider_zhihu(self):
list_zhihu = [] #此列表用于储存解析结果
res = Spider(zhihu_api).res
#逐步解析接口返回的json
zhihu_data = json.loads(res.text)['data']
for part_zhihu_data in zhihu_data: #遍历每一个data对象
zhihu_id = part_zhihu_data['target']['id'] #从对象得到问题的id
zhihu_title = part_zhihu_data['target']['title'] #从对象得到问题的title
list_zhihu.append([zhihu_title,zhihu_id]) #将id 和title组为一个列表,并添加在list_zhihu列表中
return packdata(list_zhihu)
#微博热搜
def spider_weibo(self):
list_weibo = [] #此列表用于储存解析结果
weibo = "https://s.weibo.com"
soup = Spider(weibo_api).soup
for soup_a in soup.xpath("//td[@class='td-02']/a"):
wb_title = soup_a.text
wb_url = weibo + soup_a.get('href')
#过滤微博的广告,做个判断
if "javascript:void(0)" in wb_url:
pass
else:
list_weibo.append([wb_title,wb_url])
return packdata(list_weibo)
#贴吧热度榜单
def spider_tieba(self):
list_tieba = []
soup = soup = Spider(tieba_api).soup
for soup_a in soup.xpath("//a[@class='topic-text']"):
tieba_title = soup_a.text
tieba_url = soup_a.get('href')
list_tieba.append([tieba_title,tieba_url])
return packdata(list_tieba)
#V2EX热度榜单
def spider_vsite(self):
list_v2ex = []
vsite ="https://www.v2ex.com"
soup = Spider(vsite_api).soup
for soup_a in soup.xpath("//span[@class='item_title']/a"):
vsite_title = soup_a.text
vsite_url = vsite+soup_a.get('href')
list_v2ex.append([vsite_title,vsite_url])
return packdata(list_v2ex)
#B站排行榜
def spider_bsite(self):
list_bsite = []
soup = Spider(bsite_api).soup
for i in soup.xpath("//div[@class='info']/a"):
bsite_title = i.xpath('text()')[0]
bsite_url = i.get('href')
list_bsite.append([bsite_title,bsite_url])
return packdata(list_bsite)
Spider().spider_bsite()
|
# code mostley based on "Complex Momentum for Optimization in Games"
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import matplotlib as mpl
from tqdm import tqdm
def get_spectral_radius(g, eta, beta, is_eg=False, is_eg_and_cm=False,
is_og=False):
if is_eg:
eig_vals, eig_vecs = np.linalg.eig(g)
eg_eig_vals = 1 - eta * (eig_vals * (1 - beta.real * eig_vals))
eig_norms = np.real(np.sqrt(eg_eig_vals * np.conjugate(eg_eig_vals)))
max_norm = np.max(eig_norms)
return max_norm
elif is_eg_and_cm:
eig_vals, eig_vecs = np.linalg.eig(g)
eg_eig_vals = eig_vals * (1 - eta * eig_vals)
g = np.diag(eg_eig_vals)
elif is_og:
n = len(g)
R_t_min_1 = np.concatenate((np.eye(n) * 0.0, np.eye(n)))
R_t = np.concatenate((beta.real * g,
np.eye(n) - 2.0 * eta.real * g))
R = np.concatenate([R_t_min_1, R_t], axis=1)
eig_vals, eig_vecs = np.linalg.eig(R)
eig_norms = np.real(np.sqrt(eig_vals * np.conjugate(eig_vals)))
max_norm = np.max(eig_norms)
return max_norm
n = len(g)
R_re = np.concatenate((beta.real * np.eye(n),
-beta.imag * np.eye(n), -g))
R_im = np.concatenate([(beta.imag) * np.eye(n),
beta.real * np.eye(n),
0 * np.eye(n)])
R_param = np.concatenate([
(eta * beta).real * np.eye(n),
-(eta * beta).imag * np.eye(n),
np.eye(n) - eta.real * g])
R = np.concatenate([R_re, R_im, R_param], axis=1)
eig_vals, eig_vecs = np.linalg.eig(R)
eig_norms = np.real(np.sqrt(eig_vals * np.conjugate(eig_vals)))
max_norm = np.max(eig_norms)
return max_norm
def get_spectral_radius_LEAD(nabla_v, M, D, P, eta, beta, alpha):
n = nabla_v.shape[0] // 2
j11 = (1 + beta) * np.eye(2 * n) - eta * nabla_v - alpha * np.dot(D, P)
j12 = - beta * np.eye(2 * n) + alpha * nabla_v + alpha * np.dot(D, P)
j21 = np.eye(2 * n)
j22 = np.zeros((2 * n, 2 * n))
row_1 = np.concatenate([j11, j12], 1)
row_2 = np.concatenate([j21, j22], 1)
j = np.concatenate([row_1, row_2])
eig_vals, eig_vecs = np.linalg.eig(j)
eig_norms = np.real(np.sqrt(eig_vals * np.conjugate(eig_vals)))
max_norm = np.max(eig_norms)
return max_norm
np.random.seed(0)
n_game = 14
sample_constant_adv = 4.0
rand_vals_1 = np.linspace(
1.0 / sample_constant_adv, sample_constant_adv, num=n_game // 2)
matrix_A = np.diag(rand_vals_1)
adv_component = np.zeros((n_game, n_game))
adv_component[:n_game // 2, n_game // 2:] = -matrix_A
adv_component[n_game // 2:, :n_game // 2] = matrix_A
sample_constant_coop = sample_constant_adv
matrix_B = np.linspace(
1.0 / sample_constant_coop, sample_constant_coop, num=n_game)
matrix_B[:n_game // 2] = np.linspace(
1.0 / sample_constant_coop, sample_constant_coop, num=n_game // 2)
matrix_B[-n_game // 2:] = matrix_B[:n_game // 2]
coop_component = np.diag(matrix_B)
num_hist_bins = 50
dist_flag = False
g = None
plt.figure(figsize=(8, 5))
ax2 = plt.subplot(1, 1, 1)
cmap = mpl.cm.get_cmap('viridis')
# beta is the momentum
# eta is the learning rate that we do the for-loop over
for beta_phase, beta_phase_string in (
[(None, 'LEAD'),
(0.0, 'PM'),
(np.pi / 2.0, r'CM (pi/2)'),
(np.pi, r'NM'),
(None, r'EG'),
(None, r'OG'),
(None, r'GDA')]):
mixture_scores = []
mixture_space = np.linspace(0, 1, num=20)
alpha_space = np.linspace(-0.5, 0.5, num=num_hist_bins)
for mixture in tqdm(mixture_space):
np.random.seed(0)
gamma = np.zeros(n_game)
gamma[:n_game // 2] = np.random.uniform(
0, mixture, size=n_game // 2)
gamma[0] = 0
gamma[n_game // 2 - 1] = mixture
gamma[-n_game // 2:] = gamma[:n_game // 2]
g = gamma * adv_component + (1 - gamma) * coop_component
beta_mags, eta_mags, rates = [], [], []
if beta_phase is not None:
beta_mag_space = np.linspace(0.0, 1.0, num=num_hist_bins)
for beta_mag in beta_mag_space:
beta = beta_mag * np.exp(1j * beta_phase)
eta_space = np.linspace(.0, 1.0, num=num_hist_bins)
for eta in eta_space:
is_eg_and_cm = False
spectral_radius = get_spectral_radius(g=g, eta=eta, beta=beta, is_eg_and_cm=is_eg_and_cm)
beta_mags += [beta_mag]
eta_mags += [eta]
if is_eg_and_cm:
rates += [np.sqrt(spectral_radius)]
else:
rates += [spectral_radius]
else:
if beta_phase_string == 'LEAD':
n = adv_component.shape[0]
gamma = np.diag(gamma)
M = (np.dot(np.eye(n) - gamma, coop_component) +
np.dot(coop_component.T, (np.eye(n) - gamma).T)) / 2
row_1 = np.concatenate([M, np.dot(gamma, adv_component)], 1)
row_2 = np.concatenate([-np.dot(adv_component.T, gamma.T), M], 1)
nabla_v = np.concatenate([row_1, row_2])
row_1 = np.concatenate([np.dot(gamma, adv_component), np.zeros((n, n))], 1)
row_2 = np.concatenate([np.zeros((n, n)), -np.dot(adv_component.T, gamma.T)], 1)
D = np.concatenate([row_1, row_2])
row_1 = np.concatenate([np.zeros((n, n)), np.eye(n)], 1)
row_2 = np.concatenate([np.eye(n), np.zeros((n, n))], 1)
P = np.concatenate([row_1, row_2])
eta_space = np.linspace(0.0, 1.75, num=num_hist_bins)
alphas = []
for eta in eta_space:
beta_mag_space = eta_space
for beta_mag in beta_mag_space:
beta = beta_mag
if beta_phase_string == r'EG':
spectral_radius = get_spectral_radius(g=g, eta=eta, beta=beta, is_eg=True)
beta_mags += [beta_mag]
eta_mags += [eta]
rates += [np.sqrt(spectral_radius)]
elif beta_phase_string == r'OG':
spectral_radius = get_spectral_radius(g=g, eta=eta, beta=beta, is_og=True)
beta_mags += [beta_mag]
eta_mags += [eta]
rates += [spectral_radius]
elif beta_phase_string == 'LEAD':
for alpha in alpha_space:
spectral_radius = get_spectral_radius_LEAD(nabla_v, M, D, P, eta, beta, alpha)
beta_mags += [beta_mag]
eta_mags += [eta]
alphas += [alpha]
rates += [spectral_radius]
else:
spectral_radius = get_spectral_radius(g=g, eta=eta, beta=0.0, is_eg=False)
beta_mags += [0.0]
eta_mags += [eta]
rates += [spectral_radius]
best_rate = np.min(rates)
mixture_scores += [best_rate]
best_index = np.argmin(rates)
print('Method:', beta_phase_string)
if len(alphas) > best_index:
print('best alpha:', alphas[best_index])
if len(eta_mags) > best_index:
print('best eta:', eta_mags[best_index])
if len(beta_mags) > best_index:
print('best beta:', beta_mags[best_index])
label = None
if beta_phase_string is not None:
label = beta_phase_string
c = None
marker = None
markersize = None
if beta_phase is not None:
c = cmap(beta_phase / np.pi)
if beta_phase_string == 'PM':
c = 'tab:purple'
marker = 'x'
markersize = 12
elif beta_phase_string == 'LEAD':
c = 'tab:orange'
marker = 'x'
markersize = 12
elif beta_phase_string == r'NM':
c = 'tab:green'
marker = 'x'
markersize = 12
elif beta_phase_string == r'EG':
c = 'tab:red'
marker = 'o'
label = beta_phase_string
elif beta_phase_string == r'OG':
c = 'tab:gray'
marker = 'o'
label = beta_phase_string
elif beta_phase_string == r'GDA':
c = 'tab:blue'
marker = '+'
markersize = 12
label = beta_phase_string
ax2.set_xlim([0.0, 1.0])
ax2.semilogy(mixture_space, mixture_scores,
label=label, c=c, alpha=0.75,
marker=marker, markersize=markersize,
linewidth=3, zorder=10)
ax2.legend(framealpha=1.0)
ax2.set_xlabel('Max adversarialness γmax')
ax2.set_ylabel('# grad. eval. to converge')
ax2.xaxis.set_ticks_position('none')
ax2.yaxis.set_ticks_position('none')
ax2.grid(color="k", linestyle="--", linewidth=0.5, alpha=0.3)
if dist_flag:
ax3 = ax2.twinx()
cur_xs = np.linspace(0.0, 1.0, 10)
cur_ys = norm.pdf(cur_xs, 0.25, 0.27)
ax3.fill_between(cur_xs, cur_ys, alpha=0.25, zorder=0)
ax3.set_ylim([0.0, 1.5])
dist_flag = False
plt.savefig('phase_compare.pdf', transparent=True, bbox_inches='tight')
|
def findFirstDifferentCharacter(firstSenctence, secondSentence):
minimum = min(len(firstSenctence), len(secondSentence))
for x in range(minimum):
if firstSenctence[x] != secondSentence[x]:
return x
return -1
firstSentence = input("Geef een string: ")
secondSentence = input("Geef een string: ")
print(findFirstDifferentCharacter(firstSentence, secondSentence)) |
import pandas as pd
class Rock:
def __init__(self, id, name):
self.id = id
self.name = name
class Sensor:
def __init__(self, id, abbreviation):
self.id = id
self.abbreviation = abbreviation
class Experiment:
def __init__(self, dataset):
self.dataset = dataset
self.rock_id = None
self.description = None
self.start_time = None
self.frequency=None
# def calculate_frequency(self):
# nrows = 10000
# df = pd.read_csv(self.dataset.filepath, nrows=nrows, usecols=["time"])
# def read_chunk()
|
import socket
import cevents
import json
import signal
import sys
import os
class IRCBot: #Main bot class
def __init__(self):
self.config = json.load(open("conf.json", "r"))
self.config = self.config['ircbot']
self.isquitting = False
self.parseline = ""
if self.config['ipv6'] == True:
self.ircsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self):
self.ircsock.connect((self.config['server'], self.config['port']))
self.handle = self.ircsock.makefile(mode='rw', buffering=1, encoding='utf-8', newline='\r\n')
print("NICK", self.config['nick'], file=self.handle)
print("USER", self.config['ident'], "localhost", "localhost", ':'+self.config['realname'], file=self.handle)
self.mainloop()
def mainloop(self):
for line in self.handle:
line = line.strip()
self.parseline = line.strip()
if self.config['debugmode'] == True: #Suppresses most server messages if false
print(line)
if line.split()[1] == "001":
print("Connection successful")
self.cjoin(self.config['channels'])
print("PRIVMSG NICKSERV :IDENTIFY "+self.config['nickservpass'], file=self.handle)
elif line.split()[0] == "PING": #Reply to PINGs
print("PONG :"+line.split(':')[1], file=self.handle)
elif line.split()[0] == "ERROR": #If the server disconnects us
if self.isquitting == False:
print("Disconnected")
sys.exit(5)
elif line == '': #No more data
if self.isquitting == False:
print("Abnormal disconnect, restarting")
sys.exit(5)
##########################
#Event capturing
##########################
elif line.split()[1] == "PRIVMSG":
print(line)
cevents.commandevent(self, self.config['nick'], line)
#########################
#Main IRC functions
#########################
def msg(self, target, message):
print("PRIVMSG --> {t}: {m}".format(t=target,m=message))
print("PRIVMSG "+target+" :"+message, file=self.handle)
def act(self, target, message):
print("ACTION --> {t}: {m}".format(t=target,m=message))
print("PRIVMSG "+target+" :\001ACTION "+message+"\001", file=self.handle)
def notice(self, target, message):
print("NOTICE "+target+" :"+message, file=self.handle)
def cjoin(self, channel):
print("JOIN "+channel, file=self.handle)
def cpart(self, channel, message):
if message == None:
print("PART "+channel, file=self.handle)
else:
print("PART "+channel+" :"+message, file=self.handle)
def quit(self, message, restart):
if restart == True:
self.isquitting = False
else:
self.isquitting = True
if message == None:
print("QUIT", file=self.handle)
else:
print("QUIT :"+message, file=self.handle)
def kick(self, channel, user, message):
if message == None:
print("KICK "+channel+" "+user, file=self.handle)
else:
print("KICK "+channel+" "+user+" :"+message, file=self.handle)
def getnames(self, channel):
print("NAMES "+channel, file=self.handle)
if self.parseline.split()[1] == "353":
names = self.parseline.split()[5]
return names
def ping(self, param):
print("PING :"+param, file=self.handle)
def nick(self, name):
print("NICK "+name, file=self.handle)
def restart(self):
py = sys.executable
print("Restarting...")
os.execl(py, py, * sys.argv)
|
'''
Processes data
'''
import torch
import numpy as np
import utils
import os.path as osp
import pdb
'''
Load subsampled genetics data.
'''
def clean_genetics_data():
data = np.loadtxt(osp.join(utils.data_dir, 'ALL.20k.data'), delimiter=' ')
col_sums = np.sum((data==0).astype(np.int), axis=0)
#remove any column containing 0, which indicates missing
data = data[:, col_sums==0]
#convert 2->0, 1->1
data = 2 - data
return data
def load_genetics_data():
X = np.load(osp.join(utils.data_dir, 'sampled_data.npy'))
X = torch.from_numpy(X).to(dtype=torch.float32, device=utils.device)
return X
def load_glove_data():
X = np.load('/home/yihdong/partition/data/glove_dataset.npy')
X = torch.from_numpy(X).to(dtype=torch.float32, device=utils.device)
return X
def process_glove_data(dim=100):
path = osp.join(utils.data_dir, 'glove_embs.pt')
if osp.exists(path):
d = torch.load(path)
#aa={'vocab':words_ar, 'word_emb':word_emb}
return d['vocab'], d['word_emb'].to(utils.device)
else:
return load_process_glove_data(dim)
'''
Process glove vectors from raw txt file into numpy arrays.
'''
def load_process_glove_data(dim=100):
path = osp.join(utils.data_dir, 'glove.6B.{}d.txt'.format(dim))
lines = load_lines(path)
lines_len = len(lines)
words_ar = [0]*lines_len
word_emb = torch.zeros(lines_len, dim)
for i, line in enumerate(lines):
line_ar = line.split()
words_ar[i] = line_ar[0]
word_emb[i] = torch.FloatTensor([float(t) for t in line_ar[1:]])
word_emb = word_emb.to(utils.device)
return words_ar, word_emb
def load_lines(path):
with open(path, 'r') as file:
lines = file.read().splitlines()
return lines
def write_lines(lines1, path):
lines = []
for line in lines1:
lines.append(str(line) + os.linesep)
with open(path, 'w') as file:
file.writelines(lines)
if __name__ == '__main__':
data = load_genetics_data()
pdb.set_trace()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 12 11:04:28 2017
@author: Administrator
"""
def sum_list(list1):
t=sum(i for i in list1)
return t
a=eval(input())
b=list(a)
c=sum_list(b)
print(c) |
import bz2
import json
def make_stream(data):
"""
given some data in python, return an in-memory
buffer of that data that has been zipped
"""
raw_data = json.dumps(data)
b = bytes(raw_data, 'utf-8')
compressed = bz2.compress(b)
return compressed
def decompress(data):
"""
given a compressed stream of data, decompress
and decode it
"""
decompressed = bz2.decompress(data)
decoded = decompressed.decode('utf-8')
return decoded |
# hash表
class Solution:
def longestSubsequence(self, arr: List[int], difference: int) -> int:
hashmap = defaultdict(int)
n = len(arr)
res = 0
for i in range(n):
hashmap[arr[i]] = hashmap[arr[i] - difference] + 1
res = max(hashmap[arr[i]], res)
return res |
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Util to allow tensorflow_hub to be used both in 1.x and 2.x TensorFlow.
Note: this should not be needed once TF 1.13 is the lowest version to support as
that contains the tf.compat.v1 symbol.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
try:
from tensorflow.compat.v1 import * # pylint: disable=wildcard-import
# The previous line also gets us tensorflow.compat.v1.estimator.
# Be sure not to import from tensorflow_estimator without version selection.
except ImportError:
from tensorflow import add_to_collection
from tensorflow import app
from tensorflow import assign
from tensorflow import assign_add
from tensorflow import AttrValue
from tensorflow import colocate_with
from tensorflow import constant_initializer
from tensorflow import convert_to_tensor_or_indexed_slices
from tensorflow import estimator
from tensorflow import feature_column
from tensorflow import FixedLenFeature
from tensorflow import fixed_size_partitioner
from tensorflow import gather
from tensorflow import get_collection
from tensorflow import get_collection_ref
from tensorflow import get_default_graph
from tensorflow import get_variable
from tensorflow import get_variable_scope
from tensorflow import gfile
from tensorflow import global_variables
from tensorflow import global_variables_initializer
from tensorflow import initializers
from tensorflow import Graph
from tensorflow import GraphKeys
from tensorflow import layers
from tensorflow import losses
from tensorflow import MetaGraphDef
from tensorflow import name_scope
from tensorflow import nn
from tensorflow import placeholder
from tensorflow import regex_replace
from tensorflow import reset_default_graph
from tensorflow import saved_model
from tensorflow import Session
from tensorflow import set_random_seed
from tensorflow import SparseTensor
from tensorflow import SparseTensorValue
from tensorflow import sparse_fill_empty_rows
from tensorflow import sparse_placeholder
from tensorflow import sparse_reset_shape
from tensorflow import sparse_split
from tensorflow import sparse_tensor_to_dense
from tensorflow import string_to_hash_bucket_fast
from tensorflow import train
from tensorflow import trainable_variables
from tensorflow import tables_initializer
from tensorflow import variable_scope
from tensorflow import zeros_initializer
# pylint: enable=g-import-not-at-top
# pylint: enable=unused-import
|
# Generated by Django 2.2.4 on 2019-09-27 19:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('job', '0020_auto_20190925_0440'),
]
operations = [
migrations.RenameField(
model_name='jobopening',
old_name='job_summary',
new_name='company_description',
),
]
|
"""
An implementation of the divided difference algorithm.
Function divided_difference:
Given a scalar function and list of knots, then it give the output of the
divided difference algorithm; useful for such applications as
interpolation.
"""
import logging
from typing import Callable
import numpy as np
def divided_difference(
function: Callable[[float], float], knots: np.ndarray
) -> float:
r"""Computes the divided difference algorithm over `knots` with the
function `function`. It's useful for things like interpolation.
:param function: The function over which you'd like to take
the divided difference.
:type function: Callable[[float], float]
:param knots: An array of knots with which to compute the
divided difference.
:type knots: np.ndarray
:returns: The value of the divided difference with the knots over
the function.
:rtype: float
:raises ValueError: If the knots array has length 0.
Given a single knot, the divided difference is simply the function
evaluated at that knot.
:f[
divided_difference(f, [0]) = f(0)
:f]
(Nikolai Golovanov, "Geometric Modeling", pg. 43)
When there are multiple knots, the divided difference can be
computed recursively with subsets of the knots. Here first is the
two-knot case.
:f[
divided_difference(f, [0, 1]) = \frac{f(1) - f(0)}
{1 - 0}
:f]
(Nikolai Golovanov, "Geometric Modeling", eq. (1.7.1))
And here is the general m-knot case.
:f[
divided_differenct(f, [t_0, ..., t_m])
= \frac{divided_difference(f, [t_1, ..., t_m])
- divided_difference(f, [t_0, ..., t_{m-1}])}
{t_m - t_0}
:f]
(Nikolai Golovanov, "Geometric Modeling", eq. (1.7.2))
Where t_i is the ith knot
Perhaps you can see how this could be used for higher and higher
orders of interpolation. In particular, this can be used to
generate coefficients of a truncated taylor series.
:f[
taylor_series(f, t) =
divided_difference(f, [t0])
+ divided_difference(f, [t0, t1])(t - t0)
+ ...
+ divided_difference(f, [t0, t1, ..., tm])
* (t - t0) * (t - t1) * ... * (t - tm)
:f]
(Nikolai Golovanov, "Geometric Modeling", eq. (1.7.5))
Here though, I use an analytical method which is harder to reason
about but is nicer to compute.
:f[
divided_difference(f, [t_0, ..., t_m]) =
\sum_{j=0}^m \frac{f(t_j)}
{W_{0,m}'(t_j)}
W_{0,m}'(t_i) =
(t_i - t_0) * ...
* (t_i - t_{i-1}) * (t_i - t_{i+1}) * ...
* (t_i - t_m)
:f]
(Nikolai Golovanov, "Geometric Modeling", eq. (1.7.3))
"""
if knots.shape[0] == 0:
raise ValueError("knots must be a list of floats with length >= 1")
if knots.shape[0] == 1:
difference = function(knots[0])
logging.debug(
"Calculating in the case of only one knot, the divided "
"difference is %e",
difference,
)
return difference
difference = float(
np.sum(
np.array(
[
function(knots[i])
/ _divided_difference_denominator(i, knots)
for i in range(knots.shape[0])
]
)
)
)
logging.debug(
"Calculating in the general case, the divided difference is %e.",
difference,
)
return difference
def _divided_difference_denominator(
knot_index: int, knots: np.ndarray
) -> float:
"""Computes the denominator of an analytically-calculated divided
difference.
:param knot_index: The index of the particular knot the
denominator should be calculated for.
:type knot_index: int
:param knots: The array of knots.
:type knots: numpy.ndarray
:returns: The denominator of the divided difference.
:rtype: float
"""
knot = knots[knot_index]
return float(
np.prod(
np.array(
[
knot - knots[i]
for i in range(knots.shape[0])
if i != knot_index
]
)
)
)
|
import glob
import os
import shutil
from invoke import task
import disba
@task
def build(c):
shutil.rmtree("dist", ignore_errors=True)
c.run("python -m build --sdist --wheel .")
@task
def tag(c):
c.run("git tag v{}".format(disba.__version__))
c.run("git push --tags")
@task
def upload(c):
c.run("twine upload dist/*")
@task
def clean(c, bytecode=False):
patterns = [
"build",
"dist",
"disba.egg-info",
]
if bytecode:
patterns += glob.glob("**/*.pyc", recursive=True)
patterns += glob.glob("**/__pycache__", recursive=True)
for pattern in patterns:
if os.path.isfile(pattern):
os.remove(pattern)
else:
shutil.rmtree(pattern, ignore_errors=True)
@task
def black(c):
c.run("black -t py36 disba")
c.run("black -t py36 test")
@task
def docstring(c):
c.run("docformatter -r -i --blank --wrap-summaries 88 --wrap-descriptions 88 --pre-summary-newline disba")
@task
def isort(c):
c.run("isort disba")
c.run("isort test")
@task
def format(c):
c.run("invoke isort black docstring")
|
'''
Created on Jan 20, 2016
@author: Andrei Padnevici
'''
import os
import sys
if len(sys.argv) > 1:
startFolder = sys.argv[1]
else:
startFolder = "c:/"
print(startFolder)
count = 0
for (dirname, dirs, files) in os.walk(startFolder):
for filename in files:
if filename.endswith('.dll'):
filePath = os.path.join(dirname, filename)
fileSize = float(os.path.getsize(filePath) / 1024 / 1024).__round__(4)
print(str(fileSize) + "M", filePath)
break
|
import graphene
from architect.manager.models import Manager, Relationship, Resource
from graphene import Node
from graphene_django.filter import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
class ManagerNode(DjangoObjectType):
class Meta:
model = Manager
interfaces = (Node, )
filter_fields = {
'name': ['exact', 'icontains', 'istartswith'],
'engine': ['exact', 'icontains', 'istartswith'],
'description': ['exact', 'icontains', 'istartswith'],
'status': ['exact', 'icontains', 'istartswith'],
}
class RelationshipNode(DjangoObjectType):
class Meta:
model = Relationship
interfaces = (Node, )
filter_fields = ['id']
class ResourceNode(DjangoObjectType):
class Meta:
model = Resource
interfaces = (Node, )
filter_fields = ['id']
class Query(graphene.ObjectType):
managers = DjangoFilterConnectionField(ManagerNode)
relationships = DjangoFilterConnectionField(RelationshipNode)
resources = DjangoFilterConnectionField(ResourceNode)
schema = graphene.Schema(query=Query)
|
import pyodbc
con=pyodbc.connect('Driver={SQL server};Server=DESKTOP-T66VEKU\SREENATHSQL;Database=master;')
cursor=con.cursor()
cursor.execute("insert into employee values(3,'sm'),(4,'hj')")
print("table succesfully")
|
"""
类别:词-宋-作者
"""
import sqlite3
import os
import json
def make_db(db, path):
sql = '''
CREATE TABLE IF NOT EXISTS "ci_song_author" (
"id" INTEGER NOT NULL,
"name" TEXT,
"desc" TEXT,
"short_desc" TEXT,
PRIMARY KEY ("id")
);
'''
print('\r\n词-宋-作者 正在初始化...')
try:
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute(sql)
conn.commit()
author_data = os.path.join(path, 'author.song.json')
if os.path.exists(author_data) is None:
print('词-宋-作者 数据文件不存在')
return
print('\t', author_data)
with open(author_data, 'r', encoding='UTF-8') as f:
author_dict = json.load(f)
items = [(str(item['name']), str(item['description']), str(item['short_description']))
for item in author_dict]
cur.executemany(
"insert into ci_song_author(name, desc, short_desc) values (?,?,?)", items)
conn.commit()
print('词-宋-作者 数据处理完毕.')
except Exception as e:
print(e)
conn.rollback()
finally:
conn.close()
|
# -*- coding: utf-8 -*-
row1= {"name": "a1","age": 18, "salary":30000,"city":"beijing"}
row2= {"name": "a2","age": 19, "salary":20000,"city":"shanghai"}
row3= {"name": "a3","age": 20, "salary":10000,"city":"shengzheng"}
tb = [row1,row2,row3]
#获得第二行的人薪资
print('第二行人的薪资是:{0}'.format(tb[1].get("salary")))
#打印表中所有的薪资
for i in range(3):
print('{0}的薪资是:{1}'.format(tb[i].get('name'),tb[i].get("salary")))
#打印表的所有数据。
for i in range(3):
print('{0}的年龄是:{1},薪资是:{2},所在城市是:{3}'.format(tb[i].get('name'), tb[i].get('age'), tb[i].get('salary'), tb[i].get('city')))
|
class Solution(object):
def isAdditiveNumber(self, num):
"""
:type num: str
:rtype: bool
"""
# 确定起始位置
# if len(num) < 3:
# return False
# l = int((len(num) - 1)/2) + 1
# for i in range(1, l):
# if num[0] == '0' and i > 1:
# break
#
# for j in range(i + 1, i + l):
# if num[i] == '0' and j - i > 1:
# break
# flag, pos, num1, num2 = self.search(self, int(num[0:i]), int(num[i:j]), num, j)
# while flag:
# if pos == len(num):
# return True
# flag, pos, num1, num2 = self.search(self, num1, num2, num, pos)
# return False
n = len(num)
for i, j in self.firstTwo(self, n, num[0] == "0"):
if num[i] == "0" and j - i > 1: continue
a, b, cur = num[:i], num[i:j], j
while True:
c = str(int(a) + int(b))
if num.startswith(c, cur):
cur += len(c)
if cur == n: return True
a, b = b, c
else:
break
return False
def search(self, num1, num2, num, start):
if num[start] == '0':
if num1 + num2 > 0:
return False, 1, 0, 0
else:
return True, start + 1, 0, 0
sumnum = num1 + num2
l = len(str(sumnum))
if l > (len(num) - start):
return False, 0, 0, 0
if sumnum - int(num[start:start+l]) == 0 and num[start] != '0':
# print(start + l, num2, num1)
return True, start + l, num2, num1 + num2
return False, 1, 0, 0
def firstTwo(self, n, zero):
limit = 2 if zero else (n - 1) / 2 + 1
for i in range(1, limit):
j = i + 1
while n - j >= max(i, j - i):
yield (i, j)
j += 1
if __name__ == "__main__":
solu = Solution
ans = solu.isAdditiveNumber(solu, "199001200")
print(ans) |
import random
import os
import os.path as path
import PIL
from PIL import ImageFilter
import src.CNR.cnr as cnr
import src.basic_correct.b_scan as bc
import numpy as np
import pdb
import matplotlib.pyplot as plt
# SANITY CHECK: blur and add noise to image and see CNR reduced
def test_CNR_computation():
data_dir = path.join(os.getcwd() , "../data")
subdirs = [subdir for subdir in os.listdir(data_dir)]
#subdir = subdirs[int(random.random()* len(subdirs))]
subdir = 'tooth'
test_dir = path.join(data_dir,subdir)
BScan_orig = bc.BScan(test_dir).b_scan(0)
Bscan = BScan_orig * random.random() + random.random() * 255.0
pilimg = PIL.Image.fromarray(Bscan.astype("float")).convert('RGB')
im1 = pilimg.filter(ImageFilter.BLUR)
blurred_CNR = cnr.CNR(np.asarray(im1.convert('LA'))[:,:,0])
orig_CNR = cnr.CNR(BScan_orig)
print("CNRS obtained for orig and blurred %3.f %3.f", orig_CNR,blurred_CNR)
assert(orig_CNR > blurred_CNR)
|
from kitty.models import Kitty, KittyUser, UserItem
from django.http import HttpResponse
from django.core import serializers
from django.utils import simplejson
from django.forms.models import model_to_dict
from kitty import ajax
def kitty(request, id):
k = Kitty.objects.get(id=id)
k_json = {
"kittyId": k.id,
"name": k.name,
"createdBy": k.created_by
}
return HttpResponse(simplejson.dumps(k_json), content_type="application/json")
def users(request, id):
k = Kitty.objects.get(id=id)
u = KittyUser.objects.filter(kitty=k)
u_json = []
for user in u:
u_json.append(dict(userId = user.id, name = user.name, money = user.money))
return HttpResponse(simplejson.dumps(u_json), content_type="application/json")
def userItems(request, user_id):
user = KittyUser.objects.get(id=user_id)
i_json = []
for useritem in user.useritem_set.all():
i_json.append(dict(itemId = useritem.id, itemName = useritem.item.name, itemPrice = useritem.item.price, itemEAN = useritem.item.EAN, itemCount = useritem.quantity))
return HttpResponse(simplejson.dumps(i_json), content_type="application/json")
def incItem(request, user_item_id):
ajax.incItem(request, user_item_id)
useritem = UserItem.objects.get(id = user_item_id)
i_json = dict(itemId = useritem.id, itemName = useritem.item.name, itemPrice = useritem.item.price, itemCount = useritem.quantity, userMoney=useritem.user.money)
return HttpResponse(simplejson.dumps(i_json), content_type="application/json")
def decItem(request, user_item_id):
ajax.decItem(request, user_item_id)
useritem = UserItem.objects.get(id = user_item_id)
i_json = dict(itemId = useritem.id, itemName = useritem.item.name, itemPrice = useritem.item.price, itemCount = useritem.quantity, userMoney=useritem.user.money)
return HttpResponse(simplejson.dumps(i_json), content_type="application/json") |
import _plotly_utils.basevalidators
class Mesh3dsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self,
plotly_name='mesh3d',
parent_name='layout.template.data',
**kwargs
):
super(Mesh3dsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Mesh3d'),
data_docs=kwargs.pop('data_docs', """
"""),
**kwargs
)
|
# Generated by Django 2.2.4 on 2019-09-06 15:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webapp', '0003_auto_20190906_2001'),
]
operations = [
migrations.RenameModel(
old_name='bsc_chem',
new_name='Chem',
),
migrations.RenameModel(
old_name='bsc_phys',
new_name='It',
),
migrations.RenameModel(
old_name='bsc_maths',
new_name='Maths',
),
migrations.RenameModel(
old_name='msc_phys',
new_name='Phys',
),
migrations.DeleteModel(
name='bsc_it',
),
migrations.DeleteModel(
name='msc_chem',
),
migrations.DeleteModel(
name='msc_maths',
),
]
|
../XIASocket/API/python/xsocket.py |
# Generated by Django 2.1.7 on 2019-02-12 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=72)),
('url', models.URLField(default='tusome.herokuapp.com/books/')),
('Price', models.DecimalField(decimal_places=2, default=99.99, max_digits=7)),
('pages', models.IntegerField(default=234)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=36)),
],
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('books', models.ManyToManyField(to='tusome.Book')),
],
),
migrations.CreateModel(
name='Sub_Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=13)),
],
),
migrations.AddField(
model_name='category',
name='sub_categories',
field=models.ManyToManyField(to='tusome.Sub_Category'),
),
migrations.AddField(
model_name='author',
name='books',
field=models.ManyToManyField(to='tusome.Book'),
),
]
|
import re
def format_state(state):
change = 0
while state[0] == '.':
state.pop(0)
change -= 1
while state[-1] == '.':
state.pop(-1)
state = list('....') + state + list('....')
change += 4
return state, change
def calc_sum(state, extra):
return sum(i - extra for i, p in enumerate(state) if p == '#')
def solution1(state, spread, generations):
state, extra = format_state(state)
for g in range(1, generations + 1):
new = ['.'] * len(state)
for p, r in spread:
for i in range(2, len(state)-2):
if state[i-2:i+3] == p:
new[i] = r
new, change = format_state(new)
extra += change
if ''.join(state) == ''.join(new):
extra += change * (generations - g)
break
state = new
return calc_sum(state, extra)
def parse_input1(input):
state, _, *spread = input.split('\n')
_, state = state.split(':')
state = list(state.strip())
spread = [(list(p.strip()), r.strip()) for p, r in [s.split('=>') for s in spread]]
return state, spread
if __name__ == '__main__':
with open('input.txt') as fh:
print(solution1(*parse_input1(fh.read().strip()), 50000000000))
|
from django.db import models
# Create your models here.
class IssueRecord(models.Model):
project_name = models.CharField("工程名", max_length=100)
issue_content = models.CharField("发布内容",max_length=500,null=True)
issue_time = models.DateTimeField(null=True)
dev_person = models.CharField("开发人员",max_length=100,null=True)
test_person = models.CharField("测试人员",max_length=100,null=True)
issue_person = models.CharField("发布人员",max_length=100,null=True)
issue_status = models.CharField("发布状态",max_length=3,default="0")
svn_path = models.CharField("svn路径",max_length=500,null=True)
remark = models.CharField("备注信息",max_length=500,null=True)
class Meta:
db_table = 'issue_issuerecord'
ordering = ['id']
def __str__(self):
return self.issue_content
# 测试数据、
'''
from issue.models import IssueRecord
import datetime
data={"project_name":"front","issue_content":"测试页面","issue_time":datetime.datetime.now(),"dev_person":"金","test_person":"黄龙","issue_person":"发布人员","remark":"备注信息"}
for i in range(1,10000):
nice = IssueRecord(**data)
nice.save()
'''
|
def soln(N):
num = []
while N > 0:
if (1<=N<=9):
num.append(N)
num.sort()
N = 0
number = int(''.join(map(str, num)))
else:
num.append(9)
N -= 9
return number
print(soln(22))
print(soln(100))
|
import binascii
english_freq = [
0.08167, 0.01492, 0.02782, 0.04253, 0.12702, 0.02228, 0.02015, # A-G
0.06094, 0.06966, 0.00153, 0.00772, 0.04025, 0.02406, 0.06749, # H-N
0.07507, 0.01929, 0.00095, 0.05987, 0.06327, 0.09056, 0.02758, # O-U
0.00978, 0.02360, 0.00150, 0.01974, 0.00074 # V-Z
]
# msg is expected as raw binary
def xor(msg, key):
ret = bytearray()
for i in msg:
ret.append(ord(i) ^ key)
return ret
# msg is expected as raw binary
def get_score(msg):
count = []
ignored = 0
for i in range(26):
count.append(0)
for i in msg:
c = i
if (c >= 65 and c <= 90):
count[c - 65] += 1
elif (c >= 97 and c <= 122):
count[c - 97] += 1
elif (c >= 32 and c <= 126):
ignored += 1
elif c == 9 or c == 10 or c == 13:
ignored += 1
else:
return 0xfffffff
chi2 = 0
l = len(msg) - ignored
for i in range(26):
observed = count[i]
expected = l * english_freq[i]
difference = observed - expected
chi2 += difference*difference / expected
return chi2
to_crack = binascii.unhexlify("1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736")
sol = "Cooking MC's like a pound of bacon"
messages = []
for i in range(0xFF):
m = xor(to_crack, i)
messages.append((m, get_score(m)))
for msg in sorted(messages, key=lambda x: x[1]):
print msg |
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def sortEvenOdd(self, nums: List[int]) -> List[int]:
nums[::2] = sorted(nums[::2])
nums[1::2] = sorted(nums[1::2], reverse=True)
return nums
if __name__ == "__main__":
solution = Solution()
assert [2, 3, 4, 1] == solution.sortEvenOdd([4, 1, 2, 3])
assert [2, 1] == solution.sortEvenOdd([2, 1])
|
from django.contrib import admin
from .models import *
class ProductAdmin(admin.ModelAdmin):
list_display=['name','category','price_is']
prepopulated_fields = {"slug": ("name",)}
class CartAdmin(admin.ModelAdmin):
list_display=['item','user','created']
class OrderAdmin(admin.ModelAdmin):
list_display=['user','ordered']
admin.site.register(Product,ProductAdmin)
admin.site.register(Cart,CartAdmin)
admin.site.register(Order,OrderAdmin)
admin.site.register(Checkout)
# Register your models here.
|
import json
from watson_developer_cloud import VisualRecognitionV3
# BEGIN of python-dotenv section
from os.path import join, dirname
from dotenv import load_dotenv
import os
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# END of python-dotenv section
##########################
### VISUAL RECOGNITION ###
##########################
def vr_open():
"""Opens a connection to Visual Recognition
using credentials from .env file.
Parameters
----------
None
Returns
-------
VisualRecognitionV3 : the connector from Watson API.
"""
visual_recognition = VisualRecognitionV3(
'2016-05-20',
api_key=os.environ.get("VR_API_KEY")
)
return(visual_recognition)
def list_classifiers(vr_object):
"""Connects to Watson Visual Recognition
and retrieves the list of custom classifiers.
Parameters
----------
vr_object : {VisualRecognitionV3} the connector to Visual Recognition (Watson API).
Returns
-------
list : the list of classifiers ids.
"""
json_data = vr_object.list_classifiers(verbose=True)
if json_data is None:
print("Error: connection not responding")
return(None)
if "classifiers" not in json_data.keys():
print("Error: no listing available")
return(None)
id_list = []
if len(json_data.get("classifiers", [])) == 0:
print("There are no custom classifiers in your Visual Recognition component.")
return(id_list)
for clf in json_data.get("classifiers", []):
print("*** classifier id '{}'".format(clf['classifier_id']))
id_list.append(clf['classifier_id'])
print("classes: [ {} ]".format(", ".join([c['class'] for c in clf['classes']])))
print("status: {}".format(clf['status']))
return(id_list)
def delete_classifier(vr_object, classifier_id):
"""Connects to Watson Visual Recognition
and deletes a given classifier.
Parameters
----------
vr_object : {VisualRecognitionV3} the connector to Visual Recognition (Watson API).
classifiers_id : {str} the id of the classifier to delete.
Returns
-------
dict : the JSON response from Watson
"""
json_data = vr_object.delete_classifier(classifier_id=classifier_id)
return(json_data)
def read_json_file(file_path):
"""Reads and parse a json file.
Parameters
----------
file_path : {str} the path to the json file.
Returns
-------
dict : a dictionary containing the json structure read from the file.
"""
with open(file_path) as json_file:
json_content = json_file.read()
json_data = json.loads(json_content)
return(json_data)
########################
### APPLY CLASSIFIER ###
########################
def classify_image(vr_object, image_path, classifiers_id, threshold):
"""Classifies an image using Visual Recognition
and outputs the set of detected classes.
Parameters
----------
image_path : {str} the path of the image to classify.
vr_object : {VisualRecognitionV3} the connector to Visual Recognition (Watson API).
classifiers_id : {str} the id of a classifier to apply (you can use 'default').
Returns
-------
set : the set of classes ({str}) found in that image.
"""
with open(image_path, 'rb') as image_file:
response = vr_object.classify(images_file=image_file,
threshold=threshold,
classifier_ids=[classifiers_id])
return(response)
def parse_classes(json_data):
classes = set()
for key in json_data['images']:
for value in key['classifiers']:
for cls in value['classes']:
classes.add(cls['class'])
return classes
################
### TRAINING ###
################
def find_training_zipfiles(root_dir):
"""Walks through a directory to find zip archives
used as classes to train Visual Recognition.
Parameters
----------
root_dir: {str} the path to the directory to find zip archives.
Returns
-------
list: a list of all the found zip archives, formatted as dict (see Notes).
Notes
-----
Elements in the returned list are formatted as {dict}, with the following keys:
- 'path': an {str} that indicates the path of the zip archive.
- 'class': a {str} that provides the class of that archive (from zip file name).
"""
training_sets = []
for root, dirs, files in os.walk(root_dir):
for file_str in files:
if file_str.endswith(".zip"):
zip_class = file_str.rstrip(".zip")
zip_path = os.path.join(root, file_str)
training_sets.append({
'class': zip_class,
'path': zip_path
})
return(training_sets)
def create_multiclass_classifier(vr_object, classifier_id, zip_archives):
"""Create a classifier from zip archives.
Parameters
----------
vr_object : {VisualRecognitionV3} the connector to Visual Recognition (Watson API).
classifiers_id : {str} the name of the classifier to create.
zip_archives: {list} of zip archives formatted as dictionaries.
Returns
-------
None
Notes
-----
Elements in zip_archives are formatted as {dict}, with the following keys:
- 'path': an {str} that indicates the path of the zip archive.
- 'class': a {str} that provides the class of that archive (from zip file name).
"""
kwargs = {}
for entry in zip_archives:
kwarg_stringkey = "{}_positive_examples".format(entry['class'])
kwargs_value = open(entry['path'], 'rb')
kwargs[kwarg_stringkey] = kwargs_value
ret_value = vr_object.create_classifier(classifier_id, **kwargs)
print("Watson returned: {}".format(json.dumps(ret_value)))
###############
### TESTING ###
###############
def find_testing_images(root_dir):
"""Walks through a directory to find subdirs considered as classes,
and images within these subdirs considered as testing examples
for a Visual Recognition classifier.
Parameters
----------
root_dir: {str} the path to the directory to find subdirs/images.
Returns
-------
list: a list of all the found images, formatted as dict (see Notes).
Notes
-----
Elements in the returned list are formatted as {dict}, with the following keys:
- 'path': an {str} that indicates the path of an image.
- 'actual': a {set} that gives the actual class of that image (only one).
"""
possible_extensions = [".jpg", ".jpeg"]
image_list = []
for dir_name, subdir_list, file_list in os.walk(root_dir, topdown=False):
# print('Found subdir: {}'.format(dir_name))
for file_name in file_list:
for ext in possible_extensions:
if file_name.lower().endswith(ext):
image_classes = {dir_name.split('/')[-1]}
image_path = os.path.join(dir_name, file_name)
image_list.append({
'path': image_path,
'actual': image_classes
})
break
return(image_list)
def measure_accuracy(image_entries):
"""Measure the accuracy on a given list of images.
Parameters
----------
image_entries: {list} of entries (dictionaries) giving for each image it's path,
it's actual class and predicted class (see Notes).
Returns
-------
float: score for accuracy.
Notes
-----
Elements in image_entries are formatted as {dict}, with the following keys:
- 'path': an {str} that indicates the path of an image.
- 'actual': a {set} that gives the actual class of that image (only one).
- 'predicted': a {set} that gives the predicted class(es) of that image.
"""
correct = 0
for entry in image_entries:
if entry['predicted'] == entry['actual']:
correct += 1
return(correct/len(image_entries))
|
def option_menu(window):
pass
|
#Ship blueprints
class shipBase(object):
def __init__(self):
self.name = ""
self.type = ""
self.health = 0
self.built = 0
self.buildtime = 0 |
# brute force algorithm, checking every number less than root of n if it's a factor of n or not
def trial_division(n):
factors = []
for i in range(2, int(pow(n, 0.5)) + 1):
while n % i == 0:
# if i is a factor of n , append it to the factors
factors.append(i)
# calculating remainder of n after dividing it by its factor
n /= i
# if the list of factors is empty then return true (prime)
return len(factors) == 0, factors
prime, factor = trial_division(30)
print(prime)
print(factor)
|
def main():
start = input("Please enter 1 for mathematical functions, or enter 2 for string operations: ")
if start == "1":
math = input("Please enter 1 for addition, 2 for subtraction, 3 for multiplication, or 4 for division: ")
if math == "1":
input1 = float(input("Please enter the first number: "))
input12 = float(input("Please enter the number you would like to add to the first number: "))
add = input1 + input12
print(add, "is the sum of the two numbers entered")
elif math == "2":
input2 = float(input("Please enter the first number: "))
input22 = float(input("Please enter the number you would like to subtract from the first number: "))
sub = input2 - input22
print(sub, "is the difference of the two numbers entered")
elif math == "3":
input3 = float(input("Please enter the first number: "))
input32 = float(input("Please enter the number you would like to multiply with the first number: "))
mul = input3 * input32
print(mul, "is the product of the two numbers entered")
elif math == "4":
input4 = float(input("Please enter the first number: "))
input42 = float(input("Please enter the number you would like to add to the first number: "))
div = input4 / input42
print(div, "is the quotient of the two numbers entered")
else:
print("That is not a valid input")
elif start == "2":
inputs = input(
"Please enter 1 if you would like to determine the number of vowels in a string, or enter 2 if you would like to encrypt a string: ")
if inputs == "1":
message = input("Please enter the string you would like to determine the number of vowels in: ")
lowermessage = message.lower()
count = 0
a = lowermessage.count("a")
count = count + a
e = lowermessage.count("e")
count = count + e
i = lowermessage.count("i")
count = count + i
o = lowermessage.count("o")
count = count + o
u = lowermessage.count("u")
count = count + u
print("There are", count, "vowels in your string")
elif inputs == "2":
message = input("Please enter the string you would like to encrypt: ")
print("This is the encrypted message")
for i in message:
x = ord(i)
print("", 2 * x + 3, end="")
print()
else:
print("That is not a valid input")
else:
print("That is not a valid input")
main() |
# Generated by Django 3.1.8 on 2021-04-15 11:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0003_bookindexpage_bookslistingpage'),
]
operations = [
migrations.AlterField(
model_name='bookindexpage',
name='flat_menu',
field=models.CharField(blank=True, help_text='Si no está en la lista, se asignará automáticamente cuando crees la página', max_length=255, null=True),
),
]
|
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import time
browser = webdriver.Chrome()
# 访问百度
browser.get("http://www.baidu.com")
# 模拟用户点击登录按钮操作
# xx = browser.find_element_by_name('tj_login')
browser.get('https://passport.baidu.com/v2/?login&tpl=mn&u=http%3A%2F%2Fwww.baidu.com%2F')
# rows = browser.find_elements_by_css_selector(".TCP_RowOdd,.TCP_RowEven")
# # 找到弹框的div
# browser.find_element_by_class_name('tang-content')
# time.sleep(5)
#
# # 填入用户名
# browser.find_element_by_id('TANGRAM__PSP_8__userName').send_keys('username')
# time.sleep(5)
#
# # 填入密码
# browser.find_element_by_id('TANGRAM__PSP_8__password').send_keys('password')
# time.sleep(5)
|
#!/usr/bin/env python
__author__ = 'Richard Lincoln'
""" Creates a single precision copy of JPOWER. """
import os
import sys
import shutil
import re
SRC_DIR = os.path.join(os.path.dirname(__file__), 'src')
BASE_PKG = 'edu.cornell.pserc.jpower'
D_SUBPKG = 'tdouble'
S_SUBPKG = 'tfloat'
D_PREFIX = 'D'
S_PREFIX = 'S'
DDIR = os.path.join(SRC_DIR, BASE_PKG.replace('.', '/'), D_SUBPKG)
SDIR = os.path.join(SRC_DIR, BASE_PKG.replace('.', '/'), S_SUBPKG)
DOUBLE = \
r"-(\ \ +|-)?((\ \ .[0-9]+)|([0-9]+(\ \ .[0-9]*)?))(e(\ \ +|-)?[0-9]+)?$"
FLOAT = r"[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?"
FIND_REPLACE = {
D_SUBPKG: S_SUBPKG,
D_PREFIX + "jp": S_PREFIX + "jp",
"tdcomplex": "tfcomplex",
"DoubleMatrix1D": "FloatMatrix1D",
"DoubleMatrix2D": "FloatMatrix2D",
"DComplexMatrix1D": "FComplexMatrix1D",
"DComplexMatrix2D": "FComplexMatrix2D",
"DoubleFactory1D": "FloatFactory1D",
"DoubleFactory2D": "FloatFactory2D",
"DComplexFactory1D": "FComplexFactory1D",
"DComplexFactory2D": "FComplexFactory2D",
"DoubleFunctions": "FloatFunctions",
"DComplexFunctions": "FComplexFunctions",
"DoubleArrayList": "FloatArrayList",
"SparseDoubleAlgebra": "SparseFloatAlgebra",
"dfunc": "sfunc",
"double": "float",
"Double": "Float",
"Math.PI": "(float) Math.PI",
"Math.pow": "(float) Math.pow"
}
def write_files():
# if not os.path.exists(SDIR):
# os.makedirs(SDIR)
if os.path.exists(SDIR):
shutil.rmtree(SDIR)
shutil.copytree(DDIR, SDIR)#os.path.join(SRC_DIR, BASE_PKG))
def rename_files(dir):
print "Renaming:", dir
for dname in os.listdir(dir):
dpath = os.path.join(dir, dname)
if os.path.isfile(dpath) and dname.startswith(D_PREFIX):
sname = S_PREFIX + dname[len(D_PREFIX):]
print "Writing:", os.path.join(dir, sname)
os.rename(dpath, os.path.join(dir, sname))
if os.path.isdir(dpath):
rename_files(dpath)
def replace_doubles(dir):
print "Scanning:", dir
for fname in os.listdir(dir):
fpath = os.path.join(dir, fname)
if os.path.isfile(fpath) and fname.startswith(S_PREFIX):
print "Rewriting:", fpath
r = open(fpath, "rb")
s = r.read()
r.close()
for k, v in FIND_REPLACE.iteritems():
s = s.replace(k, v)
# TODO: append 'f' to all doubles
# s = re.sub(DOUBLE, "", s)
w = open(fpath, "wb")
w.write(s)
w.close()
if os.path.isdir(fpath):
replace_doubles(fpath)
def main():
write_files()
rename_files(SDIR)
replace_doubles(SDIR)
if __name__ == '__main__':
main()
|
import unittest
import json
from os import environ
from os import urandom
# Defer configs
environ['SBF_DEFER_CONFIG'] = "True"
environ['SBF_CELERY_BROKER'] = "localhost"
import slackbotframework
slackbotframework.app.config['DEBUG'] = True
slackbotframework.app.config['TESTING'] = True
slackbotframework.app.config['SECRET_KEY'] = str(urandom(32))
slackbotframework.blueprint.BLUEPRINT.config['DEBUG'] = True
slackbotframework.blueprint.BLUEPRINT.config['TESTING'] = True
slackbotframework.blueprint.BLUEPRINT.config['SECRET_KEY'] = \
slackbotframework.app.config['SECRET_KEY']
slackbotframework.blueprint.BLUEPRINT.config['VERIFICATION_TOKEN'] = "abc123"
class Tests(unittest.TestCase):
def setUp(self):
self.app = slackbotframework.app.test_client()
def tearDown(self):
del self.app
def testPass(self):
self.assertEqual(True, True)
def testVersionAvailable(self):
x = getattr(slackbotframework, "__version__", None)
self.assertTrue(x is not None)
def testVersion(self):
version_response = self.app.get("/version")
self.assertEqual(version_response.status_code, 200)
version_json = json.loads(version_response.data.decode())
api_reported_version = version_json['version']
self.assertEqual(
slackbotframework.blueprint.__version__,
api_reported_version
)
def testUrlVerificationHandshake(self):
challenge_response = self.app.post(
"/",
json={
"token": "abc123",
"challenge": "def456",
"type": "url_verification"
}
)
self.assertEqual(challenge_response.status_code, 200)
challenge_response_json = json.loads(challenge_response.data.decode())
self.assertEqual(challenge_response_json['challenge'], "def456")
if __name__ == "__main__":
unittest.main()
|
from django.contrib import admin
from .models import (Hrr,
AaCalculations,
TimeHeartZones,
AaWorkoutCalculations,
AA,
TwentyfourHourAA,
TwentyfourHourTimeHeartZones,
AACustomRanges,
AAdashboard)
# Register your models here.
class HrrAdmin(admin.ModelAdmin):
list_display = ('user_hrr','created_at','updated_at')
search_fields = ('user_hrr__username','user_hrr__email','user_hrr__first_name',
'user_hrr__last_name',)
class AaCalculationsAdmin(admin.ModelAdmin):
list_display = ('user_aa','created_at','updated_at')
search_fields = ('user_aa__username','user_aa__email','user_aa__first_name',
'user_aa__last_name',)
class TimeHeartZonesAdmin(admin.ModelAdmin):
list_display = ('user','created_at','updated_at')
search_fields = ('user__username','user__email','user__first_name',
'user__last_name',)
class AaWorkoutCalculationsAdmin(admin.ModelAdmin):
list_display = ('user_aa_workout','created_at','updated_at')
search_fields = ('user_aa_workout__username','user_aa_workout__email',
'user_aa_workout__first_name','user_aa_workout__last_name',)
class AAAdmin(admin.ModelAdmin):
list_display = ('user','created_at','updated_at')
search_fields = ('user__username','user__email',
'user__first_name','user__last_name',)
class TwentyfourHourAAAdmin(admin.ModelAdmin):
list_display = ('user','created_at','updated_at')
search_fields = ('user__username','user__email',
'user__first_name','user__last_name',)
class TwentyfourHourTimeHeartZonesAdmin(admin.ModelAdmin):
list_display = ('user','created_at','updated_at')
search_fields = ('user__username','user__email',
'user__first_name','user__last_name',)
class AACustomRangesAdmin(admin.ModelAdmin):
list_display = ('user',)
search_fields = ('user__username','user__email',
'user__first_name','user__last_name',)
class AAdashboardAdmin(admin.ModelAdmin):
list_display = ('user','created_at','updated_at')
search_fields = ('user__username','user__email','user__first_name',
'user__last_name',)
admin.site.register(Hrr,HrrAdmin)
admin.site.register(AaCalculations,AaCalculationsAdmin)
admin.site.register(TimeHeartZones,TimeHeartZonesAdmin)
admin.site.register(AaWorkoutCalculations,AaWorkoutCalculationsAdmin)
admin.site.register(AA,AAAdmin)
admin.site.register(TwentyfourHourAA, TwentyfourHourAAAdmin)
admin.site.register(TwentyfourHourTimeHeartZones, TwentyfourHourTimeHeartZonesAdmin)
admin.site.register(AACustomRanges, AACustomRangesAdmin)
admin.site.register(AAdashboard, AAdashboardAdmin) |
from database.db import metadata
from resume.models import Resume
from user.models import User
|
import numpy as np
from Politician import Politician
from Party import Party
# This module is in my GitHub too: https://github.com/NestorRV/constrained_kmeans
from constrained_kmeans import constrained_kmeans
import copy
import sys
class IdeologyAlgorithm:
def __init__(self, n_parties, politicians, R, function, function_index, max_evaluations, desertion_threshold):
if (politicians % n_parties != 0):
print("It's impossible to create", n_parties, "parties with the same amount of politicians in each one.")
raise SystemExit
self.__n_parties = n_parties
self.__politicians = politicians
self.__R = R
self.__function = function
self.__function_index = function_index
self.__evaluations = 0
self.__max_evaluations = max_evaluations
self.__desertion_threshold = desertion_threshold
self.__best_solution = Politician(self.__function, self.__function_index)
# The maximum available value
self.__best_solution.set_fitness(sys.float_info.max)
self.__population = None
if self.__function_index == 7:
self.__original_lower_bounds = np.array([0 for i in range(self.__function.num_variables)])
self.__original_upper_bounds = np.array([600 for i in range(self.__function.num_variables)])
elif self.__function_index == 25:
self.__original_lower_bounds = np.array([-2 for i in range(self.__function.num_variables)])
self.__original_upper_bounds = np.array([5 for i in range(self.__function.num_variables)])
else:
self.__original_lower_bounds = np.array([x for x in self.__function.min_bounds])
self.__original_upper_bounds = np.array([x for x in self.__function.max_bounds])
def __initialize_population(self):
# Random politicians
population = np.array([Politician(self.__function, self.__function_index) for i in range(self.__politicians)])
# Make the parties using k-means
(centroids, assignment, f) = constrained_kmeans([p.get_solution() for p in population],
np.repeat(self.__politicians / self.__n_parties,
self.__n_parties))
parties = [list() for i in range(self.__n_parties)]
# Repart the politicians
[parties[i].append(p) for p, i in zip(population, assignment)]
# Save the population
self.__population = np.array([Party(i) for i in range(self.__n_parties)])
[p.set_politicians(np.array(party)) for p, party in zip(self.__population, parties)]
def __sort_population(self):
# Sort the parties
[party.sort_party() for party in self.__population]
def __update_leader(self, leader, id_party, global_leader):
lower = np.array([p.get_lower_bounds() for p in self.__population[id_party].get_politicians()])
upper = np.array([p.get_upper_bounds() for p in self.__population[id_party].get_politicians()])
lower_bounds = np.min(lower, axis=0)
upper_bounds = np.max(upper, axis=0)
# The leader's update has three parts: introspection, local competition and global competition
insp_lower_bounds = np.array([i - self.__R * np.absolute(u - l) for i, l, u in
zip(leader.get_solution(), lower_bounds, upper_bounds)])
insp_upper_bounds = np.array([i + self.__R * np.absolute(u - l) for i, l, u in
zip(leader.get_solution(), lower_bounds, upper_bounds)])
local_lower_bounds = np.array([i - self.__R * np.absolute(u - l) for i, l, u in
zip(self.__population[id_party].get_subleader().get_solution(),
lower_bounds, upper_bounds)])
local_upper_bounds = np.array([i + self.__R * np.absolute(u - l) for i, l, u in
zip(self.__population[id_party].get_subleader().get_solution(),
lower_bounds, upper_bounds)])
global_lower_bounds = np.array([i - self.__R * np.absolute(u - l) for i, l, u in
zip(global_leader.get_solution(), lower_bounds,
upper_bounds)])
global_upper_bounds = np.array([i + self.__R * np.absolute(u - l) for i, l, u in
zip(global_leader.get_solution(), lower_bounds,
upper_bounds)])
# We need to truncate the bounds if they are out of the accepted ones
for l, u, i in zip(self.__original_lower_bounds, self.__original_upper_bounds,
range(np.size(self.__original_lower_bounds))):
if insp_lower_bounds[i] < l or insp_lower_bounds[i] > u:
insp_lower_bounds[i] = l
if insp_upper_bounds[i] > u or insp_upper_bounds[i] < l or insp_upper_bounds[i] < insp_lower_bounds[i]:
insp_upper_bounds[i] = u
if local_lower_bounds[i] < l or local_lower_bounds[i] > u:
local_lower_bounds[i] = l
if local_upper_bounds[i] > u or local_upper_bounds[i] < l or local_upper_bounds[i] < local_lower_bounds[i]:
local_upper_bounds[i] = u
if global_lower_bounds[i] < l or global_lower_bounds[i] > u:
global_lower_bounds[i] = l
if global_upper_bounds[i] > u or global_upper_bounds[i] < l or global_upper_bounds[i] < \
global_lower_bounds[i]:
global_upper_bounds[i] = u
insp_solution = np.array([np.random.uniform(x, y) for x, y in zip(insp_lower_bounds, insp_upper_bounds)])
local_solution = np.array([np.random.uniform(x, y) for x, y in zip(local_lower_bounds, local_upper_bounds)])
global_solution = np.array([np.random.uniform(x, y) for x, y in zip(global_lower_bounds, global_upper_bounds)])
insp_fitness = self.__function(insp_solution)
self.__evaluations += 1
local_fitness = self.__function(local_solution)
self.__evaluations += 1
global_fitness = self.__function(global_solution)
self.__evaluations += 1
results = np.array([insp_fitness, local_fitness, global_fitness])
# We need to transform the results to manage negative values and to
# transform the minimization problem in a maximization one
transformed = np.max(results) - (results - np.min(results))
# Get the ranges of probabilities for roulette wheel selection
probability_1 = 0 + transformed[0] / np.sum(transformed)
probability_2 = probability_1 + transformed[1] / np.sum(transformed)
# Apply roulette wheel selection
# The leader is updated according to the selected individual
r = np.random.uniform(0, 1)
if 0 < r <= probability_1:
leader.set_solution(insp_solution)
leader.set_fitness(insp_fitness)
leader.set_lower_bounds(insp_lower_bounds)
leader.set_upper_bounds(insp_upper_bounds)
elif probability_1 < r <= probability_2:
leader.set_solution(local_solution)
leader.set_fitness(local_fitness)
leader.set_lower_bounds(local_lower_bounds)
leader.set_upper_bounds(local_upper_bounds)
elif probability_2 < r <= 1:
leader.set_solution(global_solution)
leader.set_fitness(global_fitness)
leader.set_lower_bounds(global_lower_bounds)
leader.set_upper_bounds(global_upper_bounds)
def __update_party(self, party):
lower = np.array([p.get_lower_bounds() for p in party.get_politicians()])
upper = np.array([p.get_upper_bounds() for p in party.get_politicians()])
lower_bounds = np.min(lower, axis=0)
upper_bounds = np.max(upper, axis=0)
for individual, index in zip(party.get_politicians(), range(np.size(party.get_politicians()))):
if index != 0 and index != np.size(party.get_politicians()) - 1:
# The individual's update has two parts: introspection and local competition
insp_lower_bounds = np.array([i - self.__R * np.absolute(u - l) for i, l, u in
zip(individual.get_solution(), lower_bounds,
upper_bounds)])
insp_upper_bounds = np.array([i + self.__R * np.absolute(u - l) for i, l, u in
zip(individual.get_solution(), lower_bounds,
upper_bounds)])
local_lower_bounds = np.array([i - self.__R * np.absolute(u - l) for i, l, u in
zip(party.get_leader().get_solution(),
lower_bounds, upper_bounds)])
local_upper_bounds = np.array([i + self.__R * np.absolute(u - l) for i, l, u in
zip(party.get_leader().get_solution(),
lower_bounds, upper_bounds)])
# We need to truncate the bounds if they are out of the accepted ones
for l, u, i in zip(self.__original_lower_bounds, self.__original_upper_bounds,
range(np.size(lower_bounds))):
if insp_lower_bounds[i] < l or insp_lower_bounds[i] > u:
insp_lower_bounds[i] = l
if insp_upper_bounds[i] > u or insp_upper_bounds[i] < l:
insp_upper_bounds[i] = u
if local_lower_bounds[i] < l or local_lower_bounds[i] > u:
local_lower_bounds[i] = l
if local_upper_bounds[i] > u or local_upper_bounds[i] < l:
local_upper_bounds[i] = u
insp_solution = np.array(
[np.random.uniform(x, y) for x, y in zip(insp_lower_bounds, insp_upper_bounds)])
local_solution = np.array(
[np.random.uniform(x, y) for x, y in zip(local_lower_bounds, local_upper_bounds)])
insp_fitness = self.__function(insp_solution)
self.__evaluations += 1
local_fitness = self.__function(local_solution)
self.__evaluations += 1
results = np.array([insp_fitness, local_fitness])
# We need to transform the results to manage negative values and to
# transform the minimization problem in a maximization one
transformed = np.max(results) - (results - np.min(results))
# Get the ranges of probabilities for roulette wheel selection
probability_1 = 0 + transformed[0] / np.sum(transformed)
# Apply roulette wheel selection
# The leader is updated according to the selected individual
r = np.random.uniform(0, 1)
if 0 < r <= probability_1:
individual.set_solution(insp_solution)
individual.set_fitness(insp_fitness)
individual.set_lower_bounds(insp_lower_bounds)
individual.set_upper_bounds(insp_upper_bounds)
elif probability_1 < r <= 1:
individual.set_solution(local_solution)
individual.set_fitness(local_fitness)
individual.set_lower_bounds(local_lower_bounds)
individual.set_upper_bounds(local_upper_bounds)
def ideology_algorithm(self):
self.__initialize_population()
self.__sort_population()
evaluations_marker = 0
evaluation_marks = np.array([])
while self.__evaluations < self.__max_evaluations:
# Update the leader of each party
leaders = np.array([party.get_leader() for party in self.__population])
global_leader = copy.deepcopy(sorted(leaders, key=lambda p: p.get_fitness())[0])
# We must store the best solution in every iteration
if (global_leader.get_fitness() < self.__best_solution.get_fitness()):
self.__best_solution = copy.deepcopy(global_leader)
[self.__update_leader(l, i, global_leader) for l, i in zip(leaders, range(self.__n_parties)) if l != None]
# The worst individual may desert
for party, id in zip(self.__population, range(self.__n_parties)):
if np.size(party.get_politicians()) > 2:
if (np.absolute(party.get_politicians()[-1].get_fitness()) - np.absolute(
party.get_politicians()[-2].get_fitness())) > self.__desertion_threshold:
# We store the index of the parties
index = np.array(range(self.__n_parties))
# and delete the one we are processing
index = np.delete(index, id)
# We shuffle the index to choose the new party for the desertor
np.random.shuffle(index)
deserter = copy.deepcopy(party.get_politicians()[-1])
self.__population[id].remove_last()
self.__population[index[0]].add_politician(deserter)
# The deserter may be a good one, so...
self.__sort_population()
# We update all the individuals except the leader and the worst
[self.__update_party(party) for party in self.__population]
# We sort the population to get the best individuals first
self.__sort_population()
if self.__evaluations >= evaluations_marker:
evaluation_marks = np.insert(evaluation_marks, len(evaluation_marks), self.__best_solution.get_fitness())
evaluations_marker += self.__max_evaluations / 10
leaders = np.array([party.get_leader() for party in self.__population])
if sorted(leaders, key=lambda p: p.get_fitness())[0].get_fitness() < self.__best_solution.get_fitness():
return sorted(leaders, key=lambda p: p.get_fitness())[0].get_fitness()
else:
return np.append(evaluation_marks, self.__best_solution.get_fitness())
def get_population(self):
return self.__population |
import os, sys, requests, json, time, argparse
from datetime import datetime
from ouimeaux.environment import Environment
from ouimeaux.signals import statechange, receiver
parser = argparse.ArgumentParser()
parser.add_argument("-set", action = "store_true")
parser.add_argument("-isSub", action = "store_true")
parser.add_argument("-port", type = int, default = 10085)
options = parser.parse_args()
env = Environment(bind = "0.0.0.0:{}".format(options.port), with_subscribers = options.isSub)
env.start()
env.discover(5)
switch = env.get_switch('WeMo Switch1')
@receiver(statechange, sender=switch)
def switch_toggle(device, **kwargs):
print device, kwargs['state']
if options.set:
switch.on()
print(time.strftime("%H%M%S"))
sys.exit(0)
else:
switch.off()
while switch.get_state(force_update = True) == 0:
time.sleep(1)
print(switch.get_state(force_update = True))
print(time.strftime("%H%M%S"))
|
import string, sys, math
class Orbit:
def __init__(self, input):
self.orbit = {}
for line in input.split("\n"):
(objA, objB) = line.split(")")
if objA not in self.orbit:
self.orbit[objA] = Obj(objA)
if objB not in self.orbit:
self.orbit[objB] = Obj(objB)
self.orbit[objA].linkChild(self.orbit[objB])
def calcChecksum(self):
direct = 0
indirect = 0
for obj in self.orbit:
(v1, v2) = self.orbit[obj].countChildren()
direct += v1
indirect += v2
return direct + indirect
def calcTransfer(self, s, e):
startObj = self.orbit[s].father
steps = 0
while True:
stepsToChild = startObj.findChild(e)
if stepsToChild > -1:
steps += stepsToChild
return steps
elif startObj.father is not None:
steps += 1
startObj = startObj.father
else:
return -1
class Obj:
def __init__(self, name):
self.childs = {}
self.father = None
self.name = name
def linkChild(self, obj):
if obj.name not in self.childs:
self.childs[obj.name] = obj
obj.father = self
def countChildren(self):
directs = 0
indirects = 0
for c in self.childs:
(v1, v2) = self.childs[c].countChildren()
directs += v1
indirects += v2
return (len(self.childs) + directs, indirects)
def findChild(self, name, d=0):
if name in self.childs:
return d
for x in self.childs:
r = self.childs[x].findChild(name, d + 1)
if r > -1:
return r
return -1
if len(sys.argv) > 1 and sys.argv[1] == "test":
print("test mode:")
tests = [ """COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN"""]
i = 0
while i < len(tests):
o = Orbit(tests[i])
print(o.calcChecksum())
print(o.calcTransfer("YOU", "SAN"))
i += 1
if len(sys.argv) == 1:
with open(__file__.replace('.py', '.input.txt'), 'r') as f:
lines = f.read()
o = Orbit(lines)
print(o.calcTransfer("YOU", "SAN")) |
from mininet.topo import Topo
from itertools import product
CLIENTS = 3
class FatTree(Topo):
def __init__(self, levels=3, *args, **kwargs):
Topo.__init__(self, *args, **kwargs)
if levels < 1:
return
self.create_levels(levels, CLIENTS)
def create_levels(self, levels, clients_num):
# Agrego los 3 hosts "iniciales", cada uno con nombre h_{1, 2...}
# Empiezo desde el 1 ya que mininet linkea por defecto h1 con ip 00...01 ,etc
clients = [self.addHost('h{}'.format(i+1)) for i in range(clients_num)]
prev_sw = []
switches = []
for level in range(levels):
# Por cada uno de los niveles, agrego 2^nivel switches
switches_count = 2**level
# Empiezo desde el 1 ya que mininet linkea por defecto mac de s1 con 00-00-..-01 ,etc
# nivel = floor(log2(#swith)), siendo nivel=0 el primer nivel
switches = [self.addSwitch('s{}'.format(2**(level+1)-(switches_count-n))) for n in range(switches_count)]
if prev_sw:
# Estoy en un nivel no-root, agrego links entre todos los
# switches del nivel anterior y este
for a, b in product(prev_sw, switches):
self.addLink(a, b)
else:
# Estoy en el root
assert len(switches) == 1 # Redundante, pero por las dudas
# Agrego un link entre los clientes (hosts iniciales) y mi
# switch root
for client in clients:
self.addLink(client, switches[0])
# Continuo el ciclo
prev_sw = switches
# Creo un proveedor por cada switch hoja y agrego un link entre el host
# y el switch
host_num = clients_num
for sw in switches:
leaf = self.addHost('h{}'.format(host_num+1))
self.addLink(sw, leaf)
host_num += 1
topos = { 'fat_tree': FatTree }
|
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def index( request ):
return HttpResponse( "Hello" )
def lalo( request ):
return HttpResponse( "Hola Lalo" )
def juan( request ):
return HttpResponse( 'Hola Juan' )
def pablo( request ):
return HttpResponse( 'Hola Pablo' ) |
from django.contrib import admin
from django.contrib.admin.sites import site
from django.contrib.admin.widgets import ForeignKeyRawIdWidget, ManyToManyRawIdWidget
from django.urls import NoReverseMatch, reverse
from django.utils.safestring import mark_safe
class VerboseForeignKeyRawIdWidget(ForeignKeyRawIdWidget):
def label_and_url_for_value(self, v):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: v})
except (ValueError, self.rel.model.DoesNotExist):
return "", ""
try:
url = reverse(
"{}:{}_{}_change".format(
self.admin_site.name,
obj._meta.app_label,
obj._meta.object_name.lower(),
),
args=(obj.pk,),
)
except NoReverseMatch:
url = "" # Admin not registered for target model.
return (
mark_safe(
'<span class="pill"><strong><a href="{}">{}'
"</a></strong></span>".format(url, str(obj))
),
"",
)
class VerboseManyToManyRawIdWidget(ManyToManyRawIdWidget):
def label_and_url_for_value(self, value):
result = []
for v in value:
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: v})
except (ValueError, self.rel.model.DoesNotExist):
return "", ""
try:
url = reverse(
"{}:{}_{}_change".format(
self.admin_site.name,
obj._meta.app_label,
obj._meta.object_name.lower(),
),
args=(obj.pk,),
)
except NoReverseMatch:
url = "" # Admin not registered for target model.
result.append(
'<span class="pill"><strong><a href="{}">{}</a>'
'</strong> <a class="deletelink rel-delete-link" '
'data-rel="{}" href="#"></a></span>'.format(url, str(obj), obj.pk)
)
return mark_safe(" ".join(result)), ""
class ImprovedModelForm(admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name in self.raw_id_fields:
kwargs.pop("request", None)
type = db_field.remote_field.__class__.__name__
if type == "ManyToOneRel":
kwargs["widget"] = VerboseForeignKeyRawIdWidget(
db_field.remote_field, site
)
elif type == "ManyToManyRel":
kwargs["widget"] = VerboseManyToManyRawIdWidget(
db_field.remote_field, site
)
return db_field.formfield(**kwargs)
return super(ImprovedModelForm, self).formfield_for_dbfield(db_field, **kwargs)
class Media:
js = ("admin/js/improved_admin_rel.js",)
css = {"all": ("admin/css/admin-pills.css",)}
# def formfield_for_dbfield(self, db_field, **kwargs):
# if db_field.name in self.raw_id_fields:
# kwargs.pop("request", None)
# type = db_field.remote_field.__class__.__name__
# if type in ("ManyToOneRel", "OneToOneRel"):
# kwargs["widget"] = VerboseForeignKeyRawIdWidget(
# db_field.remote_field, self.admin_site
# )
# elif type == "ManyToManyRel":
# kwargs["widget"] = VerboseManyToManyRawIdWidget(
# db_field.remote_field, self.admin_site
# )
# return db_field.formfield(**kwargs)
# return super().formfield_for_dbfield(
# db_field, **kwargs
# )
|
''' James's Data & method
'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
file1=('MarchResults Sheet1.csv')
data = pd.read_csv(file1, skiprows=44)
Mode, Base, EOM, Beta, N=data.iloc[:,0], data.iloc[:,1],data.iloc[:,3],data.iloc[:,2],data.iloc[:,7]
print(N) |
import lists, re, copy,parsing
# Lists
from lists import healthySubstitutions as healthySubstitutions
##############################
##### HEALTHY TRANSFORMER ####
##############################
def healthyTransformer(recipe):
healthyRecipe = recipe
subbedIngs = {}
for ingredient in healthyRecipe.ingredients:
substitution = ""
if ingredient.name in healthySubstitutions:
baseIng = healthySubstitutions[ingredient.name]
if ingredient.descriptor in baseIng:
substitution = baseIng[ingredient.descriptor]
else:
if baseIng[""]:
substitution = baseIng[""]
else:
print "Nothing to substitute"
# PERFORM SUBSTITUTION
if '.' not in ingredient.carbs:
ingredient.carbs = '0.00'
if '.' not in ingredient.fat:
ingredient.fat = '0.00'
try:
if substitution:
newIng = healthySubIngredient(ingredient, substitution)
ind = healthyRecipe.ingredients.index(ingredient)
healthyRecipe.ingredients[ind] = newIng
print 'Substituting ', ingredient.name, ' for ', substitution
elif float(ingredient.fat) > 9.0 and 'low' not in ingredient.descriptor and 'fat' not in ingredient.descriptor and not ingredient.category == '0900' and not ingredient.category == '1100':
newIng = healthySubIngredient(ingredient, fat = True)
subbedIngs[ingredient.name] = newIng.name
ind = healthyRecipe.ingredients.index(ingredient)
healthyRecipe.ingredients[ind] = newIng
print 'Substituting ', ingredient.name, ' for ', newIng.name
elif float(ingredient.carbs) > 3.5 and 'low' not in ingredient.descriptor and 'carb' not in ingredient.descriptor and not ingredient.category == '0900' and not ingredient.category == '1100':
newIng = healthySubIngredient(ingredient, carb = True)
subbedIngs[ingredient.name] = newIng.name
ind = healthyRecipe.ingredients.index(ingredient)
healthyRecipe.ingredients[ind] = newIng
print 'Substituting ', ingredient.name, ' for ', newIng.name
except:
try:
float(ingredient.fat)
except:
print ingredient.fat, 'cannot be converted to a float'
try:
float(ingredient.carbs)
except:
print ingredient.carbs, 'cannot be converted to a float'
dir = healthyRecipe.directions
for step in dir:
#step = str(step)
newStep = step.split()
for word in newStep:
if word.endswith(','):
word = word[:-1]
if word in healthySubstitutions:
try:
ind = newStep.index(word)
except:
word = word + ','
ind = newStep.index(word)
word = word[:-1]
newStep[ind] = healthySubstitutions[word]
if not isinstance(newStep[ind], basestring):
newStep[ind] = newStep[ind][""]
elif word in subbedIngs:
try:
ind = newStep.index(word)
except:
word = word + ','
ind = newStep.index(word)
word = word[:-1]
newStep[ind] = subbedIngs[word]
newStep = ' '.join(newStep)
ind = dir.index(step)
dir[ind] = newStep
healthyRecipe.directions = dir
healthyRecipe.steps = parsing.makeSteps(healthyRecipe.directions, healthyRecipe.tools, healthyRecipe.primaryMethods, healthyRecipe.secondaryMethods)
return healthyRecipe
def healthySubIngredient(ingredient, substitution = '', carb = False, fat = False):
if substitution:
newIng = parsing.parseIngredient({"name":substitution, "amount": ""})
elif carb:
newIng = parsing.parseIngredient({"name": 'low carb ' + ingredient.descriptor + ' ' + ingredient.name + ', ' + ingredient.preparation, "amount": ""})
elif fat:
newIng = parsing.parseIngredient({"name": 'low fat ' + ingredient.descriptor + ' ' + ingredient.name + ', ' + ingredient.preparation, "amount": ""})
newIng.unit = ingredient.unit
newIng.amount = ingredient.amount
return newIng
def printRecipe(recipe, transformType):
recipe.name = "%s Version of - " % transformType + recipe.name
print recipe.unicode() |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provide a high-level interface for the Sanger sequence analysis workflow.
Functionality to retrieve identifiers and sequence records from repositories
can be imported from the ``sanger_sequencing.clients`` subpackage.
"""
import logging
from typing import Dict, List
from Bio.SeqRecord import SeqRecord
from pandas import DataFrame
from . import analysis, validation
from .helpers import log_errors
__all__ = ("sanger_verification", "plasmid_report", "sample_report")
LOGGER = logging.getLogger(__name__)
def sanger_verification(template: DataFrame,
plasmids: Dict[str, SeqRecord],
samples: Dict[str, SeqRecord]
) -> List[Dict]:
"""
Perform a complete Sanger verification for many plasmids and sample reads.
Parameters
----------
template : pandas.DataFrame
A template table with three columns: plasmid, primer, sample which
are all identifiers.
plasmids : dict
A mapping from plasmid identifiers to sequence records.
samples : dict
A mapping from sample identifiers to sequence records.
Returns
-------
list
A list of dictionaries that are each a plasmid report.
Raises
------
AssertionError
All function arguments are extensively validated and may raise errors.
See Also
--------
plasmid_report
"""
LOGGER.info("Validate template.")
errors = validation.validate_template(template)
if len(errors) > 0:
log_errors(errors)
raise AssertionError("Invalid analysis template.")
LOGGER.info("Validate plasmids.")
for plasmid in plasmids.values():
validation.validate_plasmid(plasmid, [])
LOGGER.info("Validate samples.")
for sample in samples.values():
validation.validate_sample(sample)
template = validation.drop_missing_records(template, plasmids, samples)
LOGGER.info("Generate reports.")
return [plasmid_report(plasmid_id, plasmids[plasmid_id], sub, samples)
for plasmid_id, sub in template.groupby(
"plasmid", as_index=False, sort=False)]
def plasmid_report(plasmid_id: str,
sequence: SeqRecord,
template: DataFrame,
samples: Dict[str, SeqRecord]) -> Dict:
"""
Create an analysis report for a single plasmid and one or more reads.
The plasmid report contains detailed reports on each sample read
performed. It then evaluates each sequence alignment conflict using
information from other samples as appropriate.
Parameters
----------
plasmid_id : str
The plasmid identifier.
sequence : Bio.SeqRecord.SeqRecord
The plasmid's sequence record.
template : pandas.DataFrame
A part of the template table concerning this plasmid only.
samples : dict
A mapping from sample identifiers to sequence records.
Returns
-------
dict
An individual plasmid report.
"""
LOGGER.info("Analyze plasmid '%s'.", plasmid_id)
report = {
"id": plasmid_id,
"name": sequence.name,
"samples": [
sample_report(row.sample, samples[row.sample], row.primer,
plasmid_id, sequence)
for row in template.itertuples(index=False)]
}
# Post-process reports in order to classify conflicts.
LOGGER.debug("Concatenate the detailed sample reports.")
total = analysis.concatenate_sample_reports(report["samples"])
for rep in report["samples"]:
rep["conflicts"] = analysis.summarize_plasmid_conflicts(
rep["details"], total, sequence)
return report
def sample_report(sample_id: str,
sample_sequence: SeqRecord,
primer_id: str,
plasmid_id: str,
plasmid_sequence: SeqRecord) -> Dict:
"""
Create an analysis report for a single sample read.
Parameters
----------
sample_id : str
The sample identifier.
sample_sequence : Bio.SeqRecord.SeqRecord
The sample's sequence record.
primer_id : str
The primer identifier.
plasmid_id : str
The plasmid identifier.
plasmid_sequence : Bio.SeqRecord.SeqRecord
The plasmid's sequence record.
Returns
-------
dict
An individual sample report.
"""
LOGGER.info("Analyze sample '%s'.", sample_id)
report = {
"id": sample_id,
"primer": primer_id,
"readLength": len(sample_sequence),
"errors": []
}
# Convert to base `float` for JSON compatibility.
try:
start, trimmed_seq, quality_scores, end, median = analysis.trim_sample(
sample_sequence)
except ValueError as err:
report["errors"].append(str(err))
return report
report["medianQuality"] = median
report["cutBeginning"] = int(start)
report["cutEnd"] = int(end)
align = analysis.emboss_alignment(
sample_id, trimmed_seq, plasmid_id, plasmid_sequence)
report["details"] = analysis.alignment_to_table(
align, quality_scores, start)
return report
|
"""
Crea un programa que calcule la media de todos los elementos de una lista
"""
lista=[3, 4, 5 ,6, 12, 98, 4]
contador=0
suma=0
for item in lista:
contador+=1
suma+=item
resultado=suma/contador
print("La media es: {}".format(resultado))
|
def square(num):
return num ** num
def square_print(num):
score = num ** num
print(score)
square_print(3)
a = square(10) * 3
print(a) |
import numpy as np
import skimage.morphology as morth
class Morphology:
def __init__(self, person):
self.person = person
self.figures = self.person.figures
self.morph_figures = []
self.elem_x = 5
self.elem_y = 5
def morthing(self):
# selem = morth.rectangle(self.elem_y, self.elem_x)
# selem = morth.square(5)
for i in range(len(self.person.figures)):
figure = np.array(self.person.figures[i], dtype=np.bool)
row = np.zeros((figure.shape[1],), dtype=np.bool)
for k in range(self.elem_y):
figure = np.vstack([row, figure])
figure = np.vstack([figure, row])
y_shape = figure.shape[0]
column = np.zeros((y_shape, 1), dtype=np.bool)
for k in range(self.elem_x):
figure = np.hstack([column, figure])
figure = np.hstack([figure, column])
selem = morth.rectangle(5, 3)
figure[0:int(len(figure) / 2)] = morth.closing(figure[0:int(len(figure) / 2)], selem)
selem = morth.disk(1)
figure[int(len(figure) / 2):len(figure)] = morth.closing(figure[int(len(figure) / 2):len(figure)], selem)
figure[int(len(figure) / 2):len(figure)] = morth.opening(figure[int(len(figure) / 2):len(figure)], selem)
# selem = morth.square(1)
# figure = morth.opening(figure, selem)
# selem = morth.square(2)
# figure = morth.closing(figure, selem)
# selem = morth.disk(3)
# figure = morth.closing(figure, selem)
# figure = morth.opening(figure, selem)
figure = morth.remove_small_holes(figure)
figure = morth.remove_small_objects(figure, min_size=100)
figure = self.del_empty(figure)
self.person.widths[i] = len(figure[0])
self.person.areas[i] = len(figure[0]) * len(figure)
self.person.figures[i] = figure
return self.person
def del_empty(self, figure):
del_num = 0
# rows
for i in range(len(figure)):
line = figure[i]
if np.sum(line) != 0:
del_num = i
break
figure = np.delete(figure, np.s_[0:del_num], 0)
for i in reversed(range(len(figure))):
line = figure[i]
if np.sum(line) != 0:
del_num = i
break
figure = np.delete(figure, np.s_[del_num:len(figure)], 0)
# columns
for i in range(len(figure[0])):
line = figure[:, i]
if np.sum(line) != 0:
del_num = i
break
figure = np.delete(figure, np.s_[0:del_num], 1)
for i in reversed(range(len(figure[0]))):
line = figure[:, i]
if np.sum(line) != 0:
del_num = i
break
figure = np.delete(figure, np.s_[del_num:len(figure[0])], 1)
return figure
|
# LEVEL 12
# http://www.pythonchallenge.com/pc/return/evil.html
import string
with open('data/evil2.gfx', 'rb') as f:
first_bytes = f.read(100)
# The picture shows someone dealing cards. It seems like each byte should be "dealt" to a different player (file) but we
# need to identify how many players are there.
num_players = 10
for players in range(1, num_players + 1):
print('players: ' + str(players))
player_chars = [''] * players
for i in range(len(first_bytes)):
byte = first_bytes[i]
# if not str(byte).isprintable():
if chr(byte) not in string.digits + string.ascii_letters + string.punctuation + ' ':
byte = ord('.')
player_chars[i % players] += chr(byte)
for p in range(players):
print('{}: {}'.format(p, player_chars[p]))
# 5 players show a certain symmetry, after inspecting https://en.wikipedia.org/wiki/List_of_file_signatures we find that
# the headers coincide with (JPEG, PNG, GIF, PNG, JPEG)
num_players = 5
streams = [bytes()] * num_players
i = 0
with open('data/evil2.gfx', 'rb') as f:
while True:
next_byte = f.read(1)
if not next_byte:
break
streams[i % num_players] += next_byte
i += 1
ext = ['jpeg', 'png', 'gif', 'png', 'jpeg']
for n in range(num_players):
print(len(streams[n]))
with open('data/evil2.{}.{}'.format(n, ext[n]), 'wb') as f:
f.write(streams[n])
|
import pymysql
from routes.passwords import host, username, password, db
def insert(programming_laungage,idea,by):
connection = pymysql.connect(host,username,password,db)
cursor = connection.cursor()
sql = "INSERT INTO ideas(programming_laungage,idea,by_person) VALUES(%s,%s,%s)"
try:
cursor.execute(sql,(programming_laungage,idea,by))
connection.commit()
connection.close()
return "Done"
except Exception as e:
print(e)
return "Problame"
def get_data():
connection = pymysql.connect("localhost","phpmyadmin","gitik","idealab")
cursor = connection.cursor()
sql = "SELECT * FROM ideas"
try:
cursor.execute(sql)
ar = cursor.fetchall()
connection.close()
return ar
except Exception as e:
print(e)
return "Problame" |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from subprocess import call, check_output
from sys import argv
from os import path
import syslog
from redmine import Redmine, exceptions as e
FILENAME = str(argv[0]).split('/')[len(str(argv[0]).split('/')) - 1]
APPLICATION_PATH = str(path.dirname(check_output(['readlink', '-e', argv[0]])))
API_KEY = '' # API-ключ редмайна (см. свой профиль в RM)
RM_URL = '' # URL редмайна
ICON = APPLICATION_PATH + '/rm.png' # Иконка RM
RM = Redmine(RM_URL, key=API_KEY)
TASKS = RM.issue # Объект Redmine.Issue - список задач
USAGE = """
USAGE: %s [OPTIONS] ...
\t-n\tget today tasks assigned to you
\t-g\tget specified task
Example calls:
\t%s -n
\t%s -g 6667
""" % (FILENAME, FILENAME, FILENAME)
syslog.openlog(ident='rm_tasks', logoption=syslog.LOG_PID)
def notify(title, body):
run = call(['notify-send',
'-i',
ICON,
title,
body])
return run
def today_tasks():
# project_id='-' - идентификатор корневого проекта (для просмотра задач по всем подпроектам)
# query_id - идентификатор сохраненного запроса в RM
my_tasks = TASKS.filter(project_id='-', query_id=69)
if len(my_tasks) == 0:
return notify(":)", "Задач на сегодня нет")
else:
for task in my_tasks:
title = str(task['id']) + " " + \
str(task['start_date']) + " " + \
str(task['status'])
body = task['subject'] + "\n" + \
unicode(task['description']) + \
"\n\nURL: " + RM_URL + "/issues/" + \
str(task['id'])
notify(title, body)
return True
def get_task():
task_num = str(check_output(['xsel', '-o']))
try:
task = TASKS.get(task_num, include='journals')
except e.ResourceNotFoundError:
syslog.syslog(syslog.LOG_ERR, "#" + task_num, u'Задача не существует или удалена')
return notify("#" + task_num, u'Задача не существует или удалена')
except e.ForbiddenError:
syslog.syslog(syslog.LOG_ERR, "#" + task_num, u'У вас нет прав на просмотр данной страницы')
return notify("#" + task_num, u'У вас нет прав на просмотр данной страницы')
notes = task['journals']
# Валидируем отсутствующие свойства
try:
unicode(notes[(len(task['journals']) - 1)]['notes'])
last_comment = u"-"
except e.ResourceAttrError:
last_comment = unicode(notes[(len(task['journals']) - 1)]['notes'])
try:
last_comment_author = unicode(task['journals'][(len(task['journals']) - 1)]['user']['name'])
except e.ResourceAttrError:
syslog.syslog(syslog.LOG_INFO, "#" + task_num, u"Без автора О_о")
last_comment_author = u"Без автора О_о"
try:
start_date = str(task['start_date'])
except e.ResourceAttrError:
syslog.syslog(syslog.LOG_INFO, "#" + task_num, u'Дата начала не была установлена')
start_date = u'Дата начала не была установлена'
# Устанавливаем тему и тело нотификации
title = "#" + str(task['id']) + ' ' + str(task['created_on']) + " " + str(task['status'])
body = task['subject'] + "\n" +\
u'Дата начала: ' + start_date + "\n" + u'Описание: ' +\
unicode(task['description']) +\
"\n\nURL: " + RM_URL + "/issues/" +\
str(task['id']) + u'\n\nПоследний комментарий:\n' +\
u'Автор: ' + last_comment_author + "\n" + last_comment
return notify(title, body)
if len(argv) != 2:
print(USAGE)
exit(1)
if argv[1] == '-n':
today_tasks()
elif argv[1] == '-g':
get_task()
exit(0)
|
#!/usr/bin/python
x,y = 2,8
if(x<y):
st="x is less then y"
else:
st="x is more then y"
print(st)
|
from user_input import prompt
# Messages to be imported for 'Oh Great Knight' locations
shmucksburg = ("Aye! The brigands robbed me again! What this village needs is a guardian!",
"Our armory is pitiful. Donations of weapons, shields and armor would help us defend ourselves.",
"What I need is a guardian for my caravan! Without that I can't deliver my goods to Cow-Hip. You wouldn't happen to be travelin' to Cow-Hip, would you sir?",
"Nice mask fella! You're not a robber are you? Well I guess you'd have robbed me by now!",
"Someone should go to King Vinny up north at Oldendrab Castle to request donations for our armory.",
"Mommy, can I go to the Valley of Ferdidden Objects?",
"No, dear! That place is dangerous!",
"But they say that there's treasure there!",
"Yes. And treasure attracts robbers. Besides, one would only bury something there if they thought it was cursed!"
)
cow_hip = ("Welcome to Cow-Hip! Sure, we seem to be the butt of every joke. But in my opinion we have the best fresh dairy air!",
"It sure is great to have commerce back in Cow-Hip. In fact, I heard there's a new item at the shop."
)
fiddlestick = ("Are you heading northeast to Castle Oldendrab? Do you have an audience with the King?",
"There is much wealth up north. Both in Silverrock and in Oldendrab.",
"Have you heard the terrible news? Silverrock has been invaded by an army of orcs!",
"Hooray! Silverrock is free! Hooray! We surely would have been taken next!",
"There he is! That guy with the mask saved Silverrock!"
)
silverrock = ("We are saved! Long live King Vinny!",
"We're safe once again! At least until Monster Season :("
)
oldendrab1 = ("You introduce yourself as an ambassador in order to gain audience with the King.",
"King: Good morrow, young man. What is it you seek?",
"Hero: Good King Vinny, I come on behalf of Shmucksburg. We are \
robbed incessantly. And since our armory was ransacked last year, we are \
utterly unable to defend ourselves. I request guards or perhaps armaments \
with which to defend our poor village.",
"King: My good man, this entire kingdom is poor! At least it \
will be if we cannot regain Silverrock.... As of now, nearly all of my \
guards are preparing to march west to rid Silverrock of its orcish pests. \
How, then, could I supply your village with guards or armaments? I am \
sorry good sir, but I must send you away empty-handed.",
"Hero: I will fight along side your men if you promise to help me.",
"King: Are you offering me an ultimatem?! If you are indeed \
a warrior, then I order you to fight for me! In fact, I have a specific \
task for you:",
"Go to the Valley of Forbidden Objects. There is treasure \
there with which we could bribe the orcs. I'm afraid \
they have grown dangerously armed and organized. I would rather give \
them buried treasure than waste my men on them. Here is a map. Dig \
up the chest at the end of the valley. Give it to the orcs as payment.\
Since you fancy yourself an ambassador, I'll leave convincing them to \
take it up to you.",
"I am sending trusted servants with you. They will watch \
and make sure you do not steal the treasure. If you accomplish \
what I ask, I will see what I can do for Shmucksburg. Now make haste."
)
boss2 = ("\nAs you approach Silverrock, you see two encampments facing each other \
with the helpless village close by. One of the King's servants hoists a \
negotiator's flag as you approach the orc general's tent. As you draw near, \
you see a half-dozen nasty orcs surrounding one who is nearly a foot taller \
than the rest. He is known as Guillek the Mighty, for he has never been \
harmed in combat. He is the legend who has four arms, the upper of \
which each holds a sword and the lower of which hold shields. He looks toward the box of \
treasure you had set down about fifteen yards from the tent. The King's servants \
explain to him King Vinny's offer. Guillek and two servants come out to examine \
the box.",
"Orc 1: Is it a bait-switch?",
"Orc 2: What's a bait-switch?",
"Guillek: (Sigh) We won't know if it's a bait-and-switch unless they try \
to switch it.... Knuckleheads.",
"King's Servant: As you can see, the King wishes to return these \
orc-made treasures to you in order to make peace....",
"Orc 1: What's that smell.... It smells like Valley of Cursed Objects!",
"Guillek: You try to bring us cursed treasure? This is King Vinny's \
offer?! Cursed treasure!?",
"\nPrepare for battle!\n",
"The orcs were definately not expecting that! With there general \
and greatest warrior dead and having all of King Vinny's army to deal with, \
the orcs take to flight, pursued by the King's men.",
"King's Servant: I will tell the King what you have done! Please rest \
here in Silverrock and let the King's men handle the rest of the orcs. When \
you have regained your strength, come see King Vinny!"
)
shortcut = ("You approach Silverrock. You see that an army of orcs has \
encamped just outside the village. Before you can make it to the gate, \
a very tall orc makes a move to intercept you.",
"\nPrepare for battle!\n",
"Your victory over the orcs' general is an unpleasant surprise \
for the rest of the army. But an even greater surprise had marched up to Silverrock \
during the course of your battle. Now with their general dead and having King \
Vinny's army to deal with, the orcs panic and take to flight. The King's \
army pursues the orcs as you enter Silverrock to a chorus of cheers.")
if __name__ == "__main__":
print("This is a module for 'Oh Great Knight'.")
prompt("Press enter to exit.")
|
#!/usr/bin/env python
__version__ = "0.6"
__author__ = "xrado"
import sys,os,re,urllib2
## python 2.5 compatibility
version = int(''.join(str(x) for x in sys.version_info[:2]))
try:
if version < 26: import simplejson as json
else: import json
except:
print "need json module"
os._exit(1)
## Windows
if sys.platform == 'win32':
import dbhash
import _winreg
import msvcrt
try:
k = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "Software\\GTK\\2.0")
except EnvironmentError:
print "You must install the Gtk+ 2 Runtime Environment to run this program"
while not msvcrt.kbhit():
pass
sys.exit(1)
else:
gtkdir = _winreg.QueryValueEx(k, "Path")
os.environ['PATH'] += ";%s/lib;%s/bin" % (gtkdir[0], gtkdir[0])
## Linux
if sys.platform.find('linux') != -1:
path = os.path.expanduser('~')+'/.pygtranslator'
if not os.path.isdir(path):
os.mkdir(path)
file = open(path+'/pygtranslator.ini', 'w')
file.write("[general]\nfrom = 7\nto = 9")
file.close()
_ini = os.path.expanduser('~')+'/.pygtranslator/pygtranslator.ini'
if not os.path.exists(_ini): _ini = "pygtranslator.ini"
## Other paths
_glade = "/usr/share/pygtranslator/pygtranslator.glade"
if not os.path.exists(_glade): _glade = "pygtranslator.glade"
_icon = "/usr/share/pixmaps/pygtranslator.png"
if not os.path.exists(_icon): _icon = "pygtranslator.png"
try:
import gtk
import gtk.glade
import pygtk
pygtk.require("2.0")
except:
print "GTK2 runtime required"
sys.exit(1)
import gobject,gc,httplib,ConfigParser
from urlparse import urlparse
from urllib import urlencode
import socket
socket.setdefaulttimeout(10)
config = ConfigParser.ConfigParser()
config.read(_ini)
#httplib.HTTPConnection.debuglevel = 1
class TrayIcon(gtk.StatusIcon):
def __init__(self, parent):
global _icon
self.parent = parent
gtk.StatusIcon.__init__(self)
menu = '''
<ui>
<menubar name="Menubar">
<menu action="Menu">
<menuitem action="About"/>
<separator/>
<menuitem action="Quit"/>
</menu>
</menubar>
</ui>
'''
actions = [
('Menu', None, 'Menu'),
('About', gtk.STOCK_ABOUT, '_About', None, 'About pyGtranslator', self.on_about),
('Quit', gtk.STOCK_QUIT, '_Exit', None, 'Exit', self.parent.on_exit)]
ag = gtk.ActionGroup('Actions')
ag.add_actions(actions)
self.manager = gtk.UIManager()
self.manager.insert_action_group(ag, 0)
self.manager.add_ui_from_string(menu)
self.menu = self.manager.get_widget('/Menubar/Menu/Quit').props.parent
self.set_from_file(_icon)
self.set_tooltip('pyGtranslator')
self.set_visible(True)
self.connect('popup-menu', self.on_popup_menu)
self.connect('activate', self.on_active)
def on_popup_menu(self, status, button, time):
self.menu.popup(None, None, None, button, time)
def on_active(self, data):
if self.parent.window.get_property('visible'): self.parent.window.hide()
else: self.parent.window.show_all()
def on_about(self, data):
global __version__
dialog = gtk.AboutDialog()
dialog.set_name('pyGtranslator')
dialog.set_version(__version__)
dialog.set_comments('GUI tool for Google translate')
dialog.set_website('xrado.hopto.org')
dialog.run()
dialog.destroy()
class PyGTranslator:
def __init__(self):
global _glade,_icon,config
self.config = config;
self.xml = gtk.glade.XML(_glade, 'window')
self.window = self.xml.get_widget('window')
self.window.set_icon_from_file(_icon)
self.combo = self.xml.get_widget('combobox1')
f_list = gtk.ListStore(str,str)
self.FROM = self.get_langs("sl");
self.FROM = sorted( self.FROM.items(), key=lambda x: x[1] )
for a in self.FROM: f_list.append([a[1],a[0]])
cell = gtk.CellRendererText()
self.combo.pack_start(cell)
self.combo.add_attribute(cell, 'text', 0)
self.combo.set_model(f_list)
self.combo.set_active(int(config.get("general", "from")))
self.combo2 = self.xml.get_widget('combobox2')
f_list2 = gtk.ListStore(str,str)
self.TO = self.get_langs("tl");
self.TO = sorted( self.TO.items(), key=lambda x: x[1] )
for a in self.TO: f_list2.append([a[1],a[0]])
cell2 = gtk.CellRendererText()
self.combo2.pack_start(cell2)
self.combo2.add_attribute(cell2, 'text', 0)
self.combo2.set_model(f_list2)
self.combo2.set_active(int(config.get("general", "to")))
self.textview1 = self.xml.get_widget('textview1')
self.textviewbuff1=gtk.TextBuffer(None)
self.textview1.set_buffer(self.textviewbuff1)
self.textview2 = self.xml.get_widget('textview2')
self.textviewbuff2=gtk.TextBuffer(None)
self.textview2.set_buffer(self.textviewbuff2)
self.xml.signal_autoconnect(self)
self.window.show_all()
self.tray = TrayIcon(self)
def get_translation(self,params):
try:
req = urllib2.Request('https://www.googleapis.com/language/translate/v2?'+urlencode(params))
resp = urllib2.urlopen(req)
except:
return "can't connect to google translate"
if resp.code == 200:
d = json.loads(resp.read())
return d['data']['translations'][0]['translatedText']
else: return "error occurred"
def get_langs(self,start):
u = urlparse('http://translate.google.com')
data = {}
try:
conn = httplib.HTTPConnection(u[1])
conn.request("GET", u[2])
resp = conn.getresponse()
except:
dialog = gtk.AboutDialog()
dialog.set_name('error')
dialog.set_comments("can't connect to google translate")
dialog.run()
dialog.destroy()
sys.exit(1)
if resp.status == 200:
result = resp.read()
result = result[result.index(re.search("<select.*?name="+start+".*?>",result).group(0)):result.index("</select>")]
result = result[result.index("<option"):]
rows=result.split("</option>")
for row in rows:
if "disabled" in row: continue
try:
data[row[row.index('"')+1:row.rindex('"')]] = row[row.index('>')+1:]
except:
pass
return data
def do_translate(self, widget):
model = self.combo.get_model()
index = self.combo.get_active()
model2 = self.combo2.get_model()
index2 = self.combo2.get_active()
sl = model[index][1]
if sl == 'auto': sl = ''
post = {
'key' : 'AIzaSyDkh-a1alEv25TUloDIcQlDopp4ME5ZG1Y',
'q': self.textviewbuff1.get_text(*self.textviewbuff1.get_bounds()),
'source': sl,
'target': model2[index2][1]
}
trans = self.get_translation(post)
if trans: self.textviewbuff2.set_text(trans)
def on_switch(self, widged):
model = self.combo.get_model()
index = self.combo.get_active()
model2 = self.combo2.get_model()
index2 = self.combo2.get_active()
if not model[index][1]=='auto':
if dict(self.TO)[model[index][1]] and dict(self.FROM)[model2[index2][1]]:
p=0
for i in self.TO:
if i[0]==model[index][1]: self.combo2.set_active(p)
p+=1
p=0
for i in self.FROM:
if i[0]==model2[index2][1]: self.combo.set_active(p)
p+=1
text = self.textviewbuff1.get_text(*self.textviewbuff1.get_bounds())
text2 = self.textviewbuff2.get_text(*self.textviewbuff2.get_bounds())
self.textviewbuff1.set_text(text2)
self.textviewbuff2.set_text(text)
def on_clear(self, widged):
self.textviewbuff1.set_text('')
self.textviewbuff2.set_text('')
def on_hide(self, widget, event):
self.window.hide()
return True;
def on_exit(self, widget):
global _ini
self.config.set('general', 'from', self.combo.get_active())
self.config.set('general', 'to', self.combo2.get_active())
self.config.write(open(_ini, 'w'))
gc.collect()
gtk.main_quit()
def main(self):
gtk.main()
if __name__ == '__main__':
try:
PyGTranslator().main()
except KeyboardInterrupt:
pass
|
import time
import numpy as np
import neworder as no
from markov_chain import MarkovChain
import visualisation
# Logging and checking options
# no.verbose()
no.checked()
npeople = 100000
tmax = 100
dt = 1.0
# params of poisson process transitions (p=lambda.exp(-lambda.x) where lambda=1/mean)
mu_01 = 13.0
mu_02 = 23.0
mu_12 = 29.0
mu_20 = 17.0
lambda_01 = 1.0 / mu_01
lambda_02 = 1.0 / mu_02
lambda_12 = 1.0 / mu_12
lambda_20 = 1.0 / mu_20
states = np.array([0, 1, 2])
# possible transitions:
# 0 -> 1
# \ \
# <-> 2
transition_matrix = np.array([
[1.0 - lambda_01 * dt - lambda_02 * dt, lambda_01 * dt, lambda_02 * dt ],
[0.0, 1.0 - lambda_12 * dt, lambda_12 * dt ],
[lambda_20 * dt, 0.0, 1.0 - lambda_20 * dt]
])
timeline = no.LinearTimeline(0, tmax, tmax)
model = MarkovChain(timeline, npeople, states, transition_matrix)
start = time.time()
no.run(model)
no.log("run time = %.2fs" % (time.time() - start))
visualisation.show(model)
|
#-*- coding: utf-8 -*-
### List of updates to settings.py tuples
### (to be applied by "update_settings" function below).
updates = dict(
INSTALLED_APPS = ['feedjack', 'feedjack_wp_export', 'djcelery'],
CELERY_IMPORTS = ['feedjack_wp_export.models'] )
## "south" app is not strictly required, unless you need migrations.
try: import south
except ImportError: pass
else: updates['INSTALLED_APPS'].append('south')
### Provide some Django-1.4 defaults, so that module
### can be imported into DJANGO_SETTINGS_MODULE, like this:
###
### from feedjack_wp_export.settings_base import *
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader' )
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz' )
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware' )
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles' )
### Init Celery
import djcelery
djcelery.setup_loader()
### Optinal "smart" settings module updater.
### Use it like this (at the end of settings.py):
###
### from feedjack_wp_export.settings_base import update_settings
### update_settings(__name__)
import sys, types, functools
def _update_module(mod, updates, only=None, ignore=None):
def _smart_extend(base, update, update_filter=None):
if not isinstance(update, list):
return update if base is None else base
base = list(base or list())
if update_filter: base = filter(update_filter, base)
for cls in update:
if cls not in base: base.append(cls)
return tuple(base) # django uses tuples
if isinstance(mod, types.StringTypes): mod = sys.modules[mod]
for k, v in updates.viewitems():
if (only and k not in only) or (ignore and k in ignore): continue
update_filter, update = v if isinstance(v, tuple) else (None, v)
update = _smart_extend(getattr(mod, k, None), update, update_filter)
setattr(mod, k, update)
update_settings = functools.partial(_update_module, updates=updates)
## Update django defaults (listed above) for direct imports
update_settings(__name__)
|
import os
import webbrowser
import sys
import interact.display as disp
from googlesearch import *
def _1_(arglen,command,com_arg,path,historydir,origin,func,var,browserPath,arg0,arg1,arg2,arg3,invalidnames,commandlist):
if command[0] not in arg1:
if command[0] in arg0:
print("rf5>>Command '{}' doesn't take arguments. Enter 'help' for more info.".format(command[0]))
elif command[0] in arg2:
print("rf5>>Command '{}' takes 2 arguments. Enter 'help' for more info.".format(command[0]))
elif command[0] in arg3:
print("rf5>>Command '{}' takes 3 arguments. Enter 'help' for more info.".format(command[0]))
return origin,path
types = ['int','string','float','list','tuple']
if com_arg[0] in var and command[0] not in types:
com_arg[0] = var[com_arg[0]]
if command[0]=='type':
try:
print(type(com_arg[0]))
except:
print("rf5>>Invalid value")
else:
pass
return origin,path
if command[0]=="error":
disp.error(com_arg[0])
return origin,path
if command[0]=="box":
disp.box(m=com_arg[0])
return origin,path
if command[0]=="info":
disp.info(com_arg[0])
return origin,path
if command[0]=="warn":
disp.warn(com_arg[0])
return origin,path
if command[0]=="tip":
disp.tip(com_arg[0])
return origin,path
if command[0]=='msg':
disp.msg(m=com_arg[0])
print()
return origin,path
if command[0]=='star':
disp.star(m=com_arg[0])
print()
return origin,path
if command[0]=='int':
if com_arg[0] in commandlist:
print("rf5>>Invalid variable name.")
else:
flagname=1
for i in invalidnames:
if i in com_arg[0]:
print("rf5>>Invalid variable name.")
flagname=0
break
else:
flagname=1
pass
if flagname==1:
var[com_arg[0]] = 0
else:
pass
return origin,path
if command[0]=='string':
if com_arg[0] in commandlist:
print("rf5>>Invalid variable name.")
else:
flagname=1
for i in invalidnames:
if i in com_arg[0]:
print("rf5>>Invalid variable name.")
flagname=0
break
else:
flagname=1
pass
if flagname==1:
var[com_arg[0]] = ""
else:
pass
return origin,path
if command[0]=='float':
if com_arg[0] in commandlist:
print("rf5>>Invalid variable name.")
else:
flagname=1
for i in invalidnames:
if i in com_arg[0]:
print("rf5>>Invalid variable name.")
flagname=0
break
else:
flagname=1
pass
if flagname==1:
var[com_arg[0]] = 0.0
else:
pass
return origin,path
if command[0]=='list':
if com_arg[0] in commandlist:
print("rf5>>Invalid variable name.")
else:
flagname=1
for i in invalidnames:
if i in com_arg[0]:
print("rf5>>Invalid variable name.")
flagname=0
break
else:
flagname=1
pass
if flagname==1:
var[com_arg[0]] = []
else:
pass
return origin,path
if command[0]=='tuple':
if com_arg[0] in commandlist:
print("rf5>>Invalid variable name.")
else:
flagname=1
for i in invalidnames:
if i in com_arg[0]:
print("rf5>>Invalid variable name.")
flagname=0
break
else:
flagname=1
pass
if flagname==1:
var[com_arg[0]] = ()
else:
pass
return origin,path
if command[0]=="searchhistory":
if os.path.exists(historydir)==False:
print("rf5>>No history found.")
return origin,path
filee = open(historydir,'r')
histdata = filee.readlines()
filee.close()
if histdata==[]:
print("rf5>>No history found.")
return origin,path
print()
for x in histdata:
x = x.strip()
x = x.strip("\n")
if com_arg[0] in x:
print(x)
print()
return origin,path
if command[0]=='browse':
print("\nSearch:{}\n".format(com_arg[0]))
browse_path = browserPath
for url in search(com_arg[0], tld="co.in", num=1, stop = 1, pause = 2):
webbrowser.open("https://google.com/search?q=%s" % com_arg[0])
return origin,path
try:
lif = com_arg[0].split('.')
ext=""
file = lif[0]
file = file.strip()
if len(lif)>=2:
ext = lif[1]
ext = ext.strip()
file = file.replace("/",".")
file = file.replace("\\",".")
except:
print("rf5>>An error occured. Check your input and try again.")
else:
pass
if ext!="py" and command[0]=='funclist':
print("rf5>>Command '{}' works with '.py' files only.".format(command[0]))
else:
pass
if command[0]=='addpath':
if "." in com_arg[0]:
print("rf5>>Cannot add file to path.")
return origin,path
if os.path.exists(pt:=(path+"\\"+com_arg[0]))==False:
print(f"rf5>>Path '{pt}' doesn't exist.")
return origin,path
path = path+"\\"+com_arg[0]
com_arg[0] = com_arg[0].replace("/",".")
com_arg[0] = com_arg[0].replace("\\",".")
if func.strip()=="":
func=com_arg[0]
else:
func = func+"."+com_arg[0]
func=func.strip()
if "-" in func:
print("rf5>>Invalid character '-' found. Files may or may not open and 'runfunc' will not work.")
print("\nNewPath:{}".format(path))
if com_arg[0][0]=='.' or com_arg[0][0]=='.':
print("rf5>>Dir/subdir request beginning with '/' or '\\' may result in wrong path.")
return origin,path
if command[0]=='createfile':
if os.path.exists(path+"\\"+com_arg[0])==True:
print("rf5>>File '{}' already exists.".format(com_arg[0]))
else:
try:
filee = open(path+"\\"+com_arg[0],'x')
filee.close()
except:
print("rf5>>An error occured. Check your input and try again.")
else:
print("rf5>>File '{}' has been created.".format(com_arg[0]))
return origin,path
if command[0]=='delfile':
try:
os.remove(path+"\\"+com_arg[0])
except:
print("rf5>>File '{}' doesn't exist.".format(com_arg[0]))
else:
print("rf5>>File '{}' has been deleted.".format(com_arg[0]))
return origin,path
if command[0]=='findpath':
print()
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
if com_arg[0].lower() in name.lower():
print(os.path.join(root, name))
for name in dirs:
if com_arg[0].lower() in name.lower():
print(os.path.join(root, name))
print()
return origin,path
if command[0]=='newhomepath':
origin=com_arg[0].replace("/","\\")
if os.path.exists(pt:=origin)==False:
print(f"rf5>>Path '{pt}' doesn't exist.")
origin = os.getcwd()
path=origin
func=""
print("\nHomePath:{}".format(origin))
else:
path=origin
func=""
print("\nNewHomePath:{}".format(origin))
return origin,path
if os.path.exists(path+"\\"+com_arg[0])==False:
print("rf5>>File '{}' doesn't exist.".format(com_arg[0]))
return origin,path
else:
pass
if command[0]=='RunFile':
filee = open(path+"\\"+com_arg[0],'a')
filee.write("#RunFile\n")
filee.close()
if ext=="py":
print("rf5>>The output of this file will now be displayed on the terminal.")
return origin,path
if command[0]=='content':
try:
filee = open(path+"\\"+com_arg[0],'r')
data = filee.read()
print("\n{}\n".format(data.strip("\n")))
except:
print("rf5>>Couldn't read file.")
finally:
filee.close()
return origin,path
if command[0]=='clearcontent':
filee = open(path+"\\"+com_arg[0],'w')
filee.write("")
filee.close()
print("rf5>>File content has been deleted.")
return origin,path
if command[0]=='addcontent':
sub=0
print("\nFile:{}\n".format(com_arg[0]))
while True:
try:
file = open(path+"\\"+com_arg[0],'a')
con = input(" "*sub+">")
if con=='[sub]':
sub+=4
continue
if con=='[RunFile]':
con = "#RunFile"
if con=='[endsub]':
sub=sub-4
if sub<=0:
sub=0
continue
if "$$" in con:
conli = con.split("$$")
consubli = []
consubsubli = []
for cl in conli:
cl = cl.strip()
clli = cl.split(" ")
consubli.append(clli)
for clx in consubli:
consubsubli.append(clx[0])
for cli in consubsubli:
if cli in var:
con = con.replace("$$"+cli,var[cli])
if con=='[endfile]':
file.close()
print("\nrf5>>New content has been added.")
break
file.write(" "*sub+con+"\n")
except:
print("\nrf5>>An unexpected error occured. File will be closed.")
file.close()
break
else:
pass
file.close()
return origin,path
if ext=='py':
filee = open(path+"\\"+com_arg[0],'r')
datar = filee.read()
filee.close()
if command[0]=='runfile':
print("\nStart:{}".format(com_arg[0]))
if "#RunFile" in datar:
os.system(r'{}'.format(path+"\\"+com_arg[0]))
else:
webbrowser.open(r'{}'.format(path+"\\"+com_arg[0]))
print("Stop:{}\n".format(com_arg[0]))
elif command[0]=='funclist':
filee = open(path+"\\"+com_arg[0],'r')
data = filee.readlines()
filee.close()
print()
for lines in data:
funcname=""
lines = lines.strip()
if 'def ' in lines or 'class ' in lines:
lines = lines.strip(":")
lifunc = lines.split(" ")
lifunc[0]=""
for x in lifunc:
funcname = funcname+x
if 'def ' in lines:
print("Function:"+funcname)
elif 'class ' in lines:
print("Class:"+funcname)
print()
return origin,path
else:
if command[0] == 'runfile':
print("\nStart:{}".format(com_arg[0]))
webbrowser.open(r"{}".format(path+"\\"+com_arg[0]))
print("Stop:{}\n".format(com_arg[0]))
return origin,path
|
import pymongo
# 连接mongodb
mon = pymongo.MongoClient("mongodb://root:123456@192.168.211.118:27017")
# print(mon)
# 查询数据
# 指定数据库
mydb = mon["devops"]
# 指定集合collection
col = mydb["goods"]
# 查询所有数据
data = col.find()
# 遍历输出所有数据
for d in col.find():
print(type(d))
print(d)
|
#!/usr/bin/env python
#coding=utf-8
import ahocorasick
import pymongo
import jieba
import json
def get_collection(name):
conn = pymongo.Connection("localhost", 27017)
return conn["weibo"][name]
def run():
collection = get_collection("keywords")
textcollection = get_collection("text")
tree = ahocorasick.KeywordTree()
for i in collection.find():
tree.add(i["word"].encode("utf-8"))
tree.make()
for i in textcollection.find():
item = i["text"].encode("utf-8")
try:
words = jieba.cut(item, cut_all=False)
except:
continue
ac = list()
cutwords = list()
for match in tree.findall(item):
word = item[match[0]:match[1]]
ac.append(word.decode("utf-8"))
if(len(ac)==0):
continue
for word in words:
cutwords.append(word)
string = str(i["_id"])+"\t"+json.dumps({"ac": ac, "cutwords": cutwords})
print string
# if flag==0:continue
# ac = list(set(ac))
# i["kwords"] = ac
# textcollection.save(i)
if __name__ == '__main__':
run()
|
# -*- coding: utf-8 -*-
# @Time : 2018/8/27 23:04
# @File : auto_trader.py
import config
from easytrader import helpers
import easytrader
user = easytrader.use('ths')
user.connect(r'C:\Tool\gjzq\国金证券同花顺独立下单\xiadan.exe')
# user.prepare(user=config.username, password=config.password)
print('余额')
print(user.balance)
print('持仓')
# print(user.position)
for item in user.position:
print(item)
# 当日成交
print(user.today_trades)
ipo_data = helpers.get_today_ipo_data()
# 当日可以申购的新股
print(ipo_data)
try:
user.buy('123009', price=110, amount=100)
except Exception as e:
print(e) |
__author__ = 'Sanjay Narayana'
import numpy as np
from numpy import linalg as la
from k_means import KMeans
#from Test import k_means
np.random.seed(10)
class PCA(object):
def __init__(self):
print ("brooo")
self.number_of_principal_components = 2
def compute_principal_components(self):
self.audio_data = np.genfromtxt('audioData.csv', delimiter=',')
mean_of_audio_data = np.mean(self.audio_data,axis=0)
centered_data = self.audio_data - mean_of_audio_data
covariance_matrix = np.cov(centered_data, rowvar=False)
eigen_values, eigen_vectors = la.eigh(covariance_matrix)
print("vals:::",eigen_values)
print("vectors::",eigen_vectors,eigen_vectors.shape)
eigen_values_sorted=sorted(eigen_values,reverse=True)
print("vals:::", eigen_values_sorted)
indices_of_top_eigen_vectors = np.where(eigen_values >= eigen_values_sorted[self.number_of_principal_components-1])
#print("lol::",lol)
top_eigen_vectors = eigen_vectors[indices_of_top_eigen_vectors]
print("top_eigen_vectors",top_eigen_vectors)
return top_eigen_vectors
def project_data_along_principal_components(self,eigen_vectors):
projected_data = np.dot(self.audio_data,eigen_vectors.T)
return projected_data
if __name__ == '__main__':
pca = PCA()
eigen_vectors = pca.compute_principal_components()
projected_data = pca.project_data_along_principal_components(eigen_vectors)
clusters = range(2, 11)
k_means = KMeans()
#k_means(2,projected_data)
losses = k_means.cluster(clusters,projected_data)
k_means.plot_objective_function(clusters, losses) |
# Generated by Django 3.0.2 on 2020-01-25 19:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ez_important', models.BooleanField(default=False)),
('ez_urgent', models.BooleanField(default=False)),
('order', models.IntegerField(default=0)),
('time_create', models.BigIntegerField(default=0)),
('time_update', models.BigIntegerField(default=0)),
('short_description', models.CharField(max_length=1000)),
('long_description', models.CharField(max_length=2500)),
],
),
]
|
def hi(name):
print('Hi'+name+'!')
hi('Rachel') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.