text
stringlengths
8
6.05M
# -*- coding:utf8 -*- #--------------------------总结 # by william xia, supported by 北风万视频 #第4章熟悉了 选择结构 #------------------------------ x=2 if x >2: print("dayu 2") else: print('budayu 2') #实战 people=input('请输入人数\n') zhan=input("请输入站数\n") if (zhan >= 1 and zhan <=4): print 'price',people*3 elif people>=5 and people <=9: print 'price',people*4 else: print 'price',people*5
from core import data def get_data(logger, redis_host, redis_port, redis_db): return data.Data(logger, redis_host, redis_port, redis_db)
import locale from yandex import Translater from update2text import update2text from telegram import ReplyKeyboardMarkup as rkm from activity import Activity from config import BOT_API_TOKEN from config import YANDEX_API_KEY class Translator(Activity): def __init__(self): locale.setlocale(locale.LC_ALL, '') self.translator = Translater() self.translator.set_key(YANDEX_API_KEY) self.mode = 'EN-->RU' self.default_markup = rkm([['Exit']]) def first_query(self, bot, update): self.__init__() self.mode = rkm([['EN-->RU'], ['RU-->EN']], one_time_keyboard=True) bot.sendMessage( chat_id=update.message.chat.id, text="Select languages:", reply_markup=self.mode ) def process(self, query, bot, update): result = "" if query == 'RU-->EN': self.translator.set_from_lang('ru') self.translator.set_to_lang('en') self.locale = "ru-RU" elif query == 'EN-->RU': self.translator.set_from_lang('en') self.translator.set_to_lang('ru') self.locale = "en-US" else: ans = update2text(update, self.locale) if ans != None: self.translator.set_text(ans) result = self.translator.translate() bot.sendMessage( chat_id=update.message.chat.id, text=result, reply_markup=self.default_markup ) if len(result) == 0: result = "What? try again, keep calm speak slowly and clearly."
import pytest import os if __name__ == '__main__': run_config = [ "-s", "./script/ji_yun_ying_app/workbench/test_myclue.py", "--alluredir", "./report/xml", "--host", "http://192.168.32.31", "--account", "A", "--db", "36" ] x = pytest.main(run_config) # os模块运行allure命令,来生成html格式的报告(根据刚刚生成的配置信息) # os.system("/Users/huangqiang/WorkPlace/allure-2.13.8/bin/allure generate ./report/xml -o ./report/html --clean")
import unittest from charm.toolbox.symcrypto import SymmetricCryptoAbstraction,AuthenticatedCryptoAbstraction, MessageAuthenticator from charm.toolbox.pairinggroup import PairingGroup,GT from charm.core.math.pairing import hashPair as sha1 class SymmetricCryptoAbstractionTest(unittest.TestCase): def testAESCBC(self): self.MsgtestAESCBC(b"hello world") def testAESCBCLong(self): self.MsgtestAESCBC(b"Lots of people working in cryptography have no deep \ concern with real application issues. They are trying to discover things \ clever enough to write papers about -- Whitfield Diffie.") def testAESCBC_Seperate(self): self.MsgTestAESCBCSeperate(b"Lots of people working in cryptography have no deep \ concern with real application issues. They are trying to discover things \ clever enough to write papers about -- Whitfield Diffie.") def MsgtestAESCBC(self,msg): groupObj = PairingGroup('SS512') a = SymmetricCryptoAbstraction(sha1(groupObj.random(GT))) ct = a.encrypt(msg) dmsg = a.decrypt(ct); assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg) def MsgTestAESCBCSeperate(self,msg): groupObj = PairingGroup('SS512') ran = groupObj.random(GT) a = SymmetricCryptoAbstraction(sha1(ran)) ct = a.encrypt(msg) b = SymmetricCryptoAbstraction(sha1(ran)) dmsg = b.decrypt(ct); assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg) class AuthenticatedCryptoAbstractionTest(unittest.TestCase): def testAESCBC(self): self.MsgtestAESCBC(b"hello world") def testAESCBCLong(self): self.MsgtestAESCBC(b"Lots of people working in cryptography have no deep \ concern with real application issues. They are trying to discover things \ clever enough to write papers about -- Whitfield Diffie.") def testAESCBC_Seperate(self): self.MsgTestAESCBCSeperate(b"Lots of people working in cryptography have no deep \ concern with real application issues. They are trying to discover things \ clever enough to write papers about -- Whitfield Diffie.") def MsgtestAESCBC(self,msg): groupObj = PairingGroup('SS512') a = AuthenticatedCryptoAbstraction(sha1(groupObj.random(GT))) ct = a.encrypt(msg) dmsg = a.decrypt(ct); assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg) def MsgTestAESCBCSeperate(self,msg): groupObj = PairingGroup('SS512') ran = groupObj.random(GT) a = AuthenticatedCryptoAbstraction(sha1(ran)) ct = a.encrypt(msg) b = AuthenticatedCryptoAbstraction(sha1(ran)) dmsg = b.decrypt(ct); assert msg == dmsg , 'o: =>%s\nm: =>%s' % (msg, dmsg) class MessageAuthenticatorTest(unittest.TestCase): def testSelfVerify(self): key = sha1(PairingGroup('SS512').random(GT)) m = MessageAuthenticator(key) a = m.mac('hello world') assert m.verify(a), "expected message to verify"; def testSeperateVerify(self): key = sha1(PairingGroup('SS512').random(GT)) m = MessageAuthenticator(key) a = m.mac('hello world') m1 = MessageAuthenticator(key) assert m1.verify(a), "expected message to verify"; def testTamperData(self): key = sha1(PairingGroup('SS512').random(GT)) m = MessageAuthenticator(key) a = m.mac('hello world') m1 = MessageAuthenticator(key) a["msg"]= "tampered" assert not m1.verify(a), "expected message to verify"; def testTamperMac(self): key = sha1(PairingGroup('SS512').random(GT)) m = MessageAuthenticator(key) a = m.mac('hello world') m1 = MessageAuthenticator(key) a["digest"]= "tampered" assert not m1.verify(a), "expected message to verify"; def testTamperAlg(self): key = sha1(PairingGroup('SS512').random(GT)) m = MessageAuthenticator(key) a = m.mac('hello world') m1 = MessageAuthenticator(key) m1._algorithm = "alg" # bypassing the algorithm check to verify the mac is over the alg + data a["alg"]= "alg" assert not m1.verify(a), "expected message to verify"; if __name__ == "__main__": unittest.main()
nome = str(input('Digite seu nome:')).strip() print('Em maiusculo fica {}'.format(nome.upper())) print('Em minusculo fica {}'.format(nome.lower())) print('Ele tem {} letras'.format(len(nome.split()))) print('O primeiro nome tem {} letras'.format(len(nome) - nome.count(' ')))
from django.apps import AppConfig class PrezolaConfig(AppConfig): name = 'prezola' def ready(self): import prezola.signals
# Generated by Django 3.0.7 on 2020-06-19 08:43 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("foodcartapp", "0003_auto_20200619_0838"), ] operations = [ migrations.RenameField( model_name="product", old_name="availabilty", new_name="availability", ), ]
p,q=map(int,input().split()) li=list() for x in range(p,q+1): cnt=0 for i in range(1,x+1): if x%i==0: cnt+=1 if cnt==2: li.append(x) print(len(li))
from django.contrib.syndication.views import Feed from django.utils.feedgenerator import Atom1Feed from tssite.models import TalmudStudy class RSSTalmudFeed(Feed): title = "Talmud Study" link = "/feeds/rss/talmud-study" description = "Talmud Study is an initiative to promote the independent study of the Talmud Bavli through a web-based platform and weekly class. Our program aims to provide the Jewish community with immediate access to Torah knowledge in the form of a daily e-mail/podcast to complete and understand the Talmud with the Daf Yomi cycle. Talmud Study hopes to broaden Torah learning, to increase knowledge of our Jewish history, heighten our Yirat Shamayim, Ahavat Hashem, and strengthen our personal as well as our national identity." item_author_name = 'Talmud Study' item_author_email = 'info@tanachstudy.com' def items(self): return TalmudStudy.objects.all().order_by('-date', '-teacher')[:500] def item_title(self, item): return str(item) def item_description(self, item): title = str(item) seder = item.seder.title() masechet = item.masechet.title() teacher = str(item.teacher) link = item.get_location() seder_sponsor = '' if not item.seder_sponsor else item.seder_sponsor masechet_sponsor = '' if not item.masechet_sponsor else item.masechet_sponsor daf_sponsor = '' if not item.daf_sponsor else item.daf_sponsor description = f'{title}<br /><br />This daf in {seder} is taught by {teacher}.' if seder_sponsor: description = f'{description}<br /><br />Seder {seder}<br />{seder_sponsor}' else: description = f'{description}<br /><br />Seder {seder}<br /><i>Sponsorship available</i>' if masechet_sponsor: description = f'{description}<br /><br />Masechet {masechet}<br />{masechet_sponsor}' else: description = f'{description}<br /><br />Masechet {masechet}<br /><i>Sponsorship available</i>' if daf_sponsor: description = f'{description}<br /><br />Daf {item.daf}<br />{daf_sponsor}' else: description = f'{description}<br /><br />Daf {item.daf}<br /><i>Sponsorship available</i>' return description def item_link(self, item): host = 'https://tanachstudy.com' return f'{host}{item.get_location()}' def item_enclosure_url(self, item): return f'https://cdn.tanachstudy.com/{item.audio}' item_enclosure_mime_type = 'audio/mpeg' class AtomTalmudFeed(RSSTalmudFeed): feed_type = Atom1Feed subtitle = RSSTalmudFeed.description link = "/feeds/atom/talmud-study"
# 题目:将一个列表的数据复制到另一个列表中 # 程序分析:使用列表[:]。 a = [1, 2, 3] b = a[:] print(b) """ 切片 li = [1,2,3,4,5,6,7] print li[1:] #输出[2,3,4,5,6,7],省略终止索引,表示取起始索引之后的所有值,等效于li[1:len(li)] print li[:3] #输出[1,2,3],省略起始索引,表示从0开始取,等效于li[0:3] print li[:] #输出[1,2,3,4,5,6,7],省略起始索引、终止索引、步长值表示取全部,等效于li[0:len(li):1] print li[::] #输出[1,2,3,4,5,6,7],省略起始索引、终止索引、步长值表示取全部,等效于li[0:len(li):1] print li[::-1] #输出[7,6,5,4,3,2,1],省略起始索引、终止索引,步长值为-1,表示反向获取 """
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from typing import Iterable from pants.backend.cc.target_types import CCSourcesGeneratorTarget, CCSourceTarget from pants.engine.rules import Rule from pants.engine.target import BoolField from pants.engine.unions import UnionRule class SkipClangFormatField(BoolField): alias = "skip_clang_format" default = False help = "If true, don't run clang-format on this target's code." def rules() -> Iterable[Rule | UnionRule]: return ( CCSourcesGeneratorTarget.register_plugin_field(SkipClangFormatField), CCSourceTarget.register_plugin_field(SkipClangFormatField), )
import socketserver class Myserver(socketserver.BaseRequestHandler): def handle(self): print('conn is: ',self.request) print('addr is:',self.client_address) while True: #收消息 data = self.request.recv(1024) print('收到客户端的消息:',data) #发消息 self.request.sendall(data.upper()) if __name__ == '__main__': s = socketserver.ThreadingTCPServer(('127.0.0.1',8080),Myserver) #s = socketserver.ForkingTCPServer(('127.0.0.1',8080),Myserver) s.serve_forever()
class Solution(object): def pseudoPalindromicPaths (self, root): def dfs(root, count=0): if not root: return 0 count ^= 1 << (root.val - 1) res = dfs(root.left, count) + dfs(root.right, count) if root.left == root.right: if count & (count - 1) == 0: res += 1 return res return dfs(root) class Solution(object): def pseudoPalindromicPaths (self, root): self.count = 0 def search(node, digitcount): digitcount = digitcount ^ (1 << node.val) if node.left: search(node.left, digitcount) if node.right: search(node.right, digitcount) if not node.left and not node.right: if digitcount & (digitcount - 1) == 0: self.count += 1 return search(root, 0) return self.count
# A simple dice simulator import random count = 'Z' while True: print "Press X to exit." print "How many times do you want to roll a dice?" value = 0 sum = 0 count = raw_input("> ") if count == 'X' or count == 'x': print 'Exiting...' break else: try: int(count) except ValueError: print "Wrong value. Try again.\n" else: count = int(count) while value < count: dice = random.randint(1, 6) print "Roll: #%d, result: %d" % (value + 1, dice) value += 1 sum += dice print "Average: ", sum / count print "\n"
# Generated by Django 3.2 on 2021-04-30 07:42 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('Tour_app', '0024_auto_20210428_1845'), ] operations = [ migrations.AddField( model_name='hotel', name='checkin_time', field=models.TimeField(default=django.utils.timezone.now), ), migrations.AddField( model_name='hotel', name='checkout_time', field=models.TimeField(default=django.utils.timezone.now), ), ]
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-05-23 18:00 from __future__ import unicode_literals from django.db import migrations import multiselectfield.db.fields class Migration(migrations.Migration): dependencies = [ ('drone', '0004_auto_20170523_0117'), ] operations = [ migrations.AddField( model_name='delivery', name='status', field=multiselectfield.db.fields.MultiSelectField(choices=[(1, 'NOT STARTED'), (2, 'STARTED'), (3, 'ABORTED'), (4, 'FINISHED')], max_length=7, null=True), ), migrations.AlterField( model_name='drone', name='status', field=multiselectfield.db.fields.MultiSelectField(choices=[(1, 'UNINIT'), (2, 'BOOT'), (3, 'CALIBRATING'), (4, 'STANDBY'), (5, 'ACTIVE'), (6, 'CRITICAL'), (7, 'EMERGENCY'), (8, 'POWEROFF')], max_length=15, null=True), ), ]
#Author: Claudio Moises Valiense de Andrade. Licence: MIT. Objective: Sistema de recomendacao para noticias no dominio de tecnologia import claudio_funcoes as cv import matplotlib.pyplot as plt from datetime import datetime import sklearn.metrics.pairwise as cos # Calculate similarity cosine import torch import numpy as np import statistics import sys import scipy.stats as stats # Calcular intervalo de confiança def arredonda(number, precisao=2): """ Arredonda number in precision. Example: arredonda(2.1234, 2); Return='2.12'""" return float(f'%.{precisao}f'%(number)) def ic(tamanho, std, confianca, type='t', lado=2): """Calcula o intervalo de confianca""" if lado is 1: lado = (1 - confianca) # um lado o intervalo fica mais estreito else: lado = (1 - confianca) /2 #print(f'Valor de t: {stats.t.ppf(1- (lado), tamanho-1) }') if type is 'normal': return stats.norm.ppf(1 - (lado)) * ( std / ( tamanho ** (1/2) ) ) return stats.t.ppf(1- (lado), tamanho-1) * ( std / ( tamanho ** (1/2) ) ) def assign_GPU(Tokenizer_output): tokens_tensor = Tokenizer_output['input_ids'].to('cuda:0') attention_mask = Tokenizer_output['attention_mask'].to('cuda:0') output = {'input_ids' : tokens_tensor, #'token_type_ids' : token_type_ids, 'attention_mask' : attention_mask} return output def representation_bert(x, pooling=None): """Create representation BERT""" import numpy from transformers import BertModel, BertTokenizer if "16" in pooling: limit_token=16 elif "32" in pooling: limit_token=32 elif "64" in pooling: limit_token=64 elif "128" in pooling: limit_token=128 elif "256" in pooling: limit_token=256 elif "512" in pooling: limit_token=512 limit_token=512 tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states = True) model = model.to('cuda:0') # gpu for index_doc in range(len(x)): inputs = tokenizer(x[index_doc], return_tensors="pt", max_length=limit_token, truncation=True) inputs = assign_GPU(inputs) outputs = model(**inputs) if 'bert_concat' in pooling or 'bert_sum' in pooling or 'bert_last_avg' in pooling or 'bert_cls' in pooling: hidden_states = outputs[2] token_embeddings = torch.stack(hidden_states, dim=0) token_embeddings = torch.squeeze(token_embeddings, dim=1) # remove a primeira dimensao que do embedding incial token_embeddings = token_embeddings.permute(1,0,2) # reordena para em cada linha ser um token diferente vets = [] for token in token_embeddings: if 'bert_concat' == pooling: vets.append( torch.cat((token[-1], token[-2], token[-3], token[-4]), dim=0).cpu().detach().numpy() ) # concatena as 4 ultimas dimensoes elif 'bert_sum' == pooling: vets.append( torch.sum(token[-4:], dim=0).cpu().detach().numpy() ) elif 'bert_last_avg' == pooling: vets.append( torch.mean(token[-4:], dim=0).cpu().detach().numpy() ) elif 'bert_cls' == pooling: x[index_doc] = token[-1].cpu().detach().numpy() # o primeiro token é o cls e ultima camada break if 'bert_cls' != pooling: x[index_doc] = numpy.mean( vets, axis=0) else: tokens = outputs[0].cpu().detach().numpy()[0] if 'bert_avg' in pooling: x[index_doc] = numpy.mean(tokens, axis=0) #average elif 'bert_max' in pooling: x[index_doc] = numpy.amax(tokens, axis=0) return x def print_box_plot(data, interval_y=None): """Show boxplot""" fig, ax = plt.subplots(figsize=(20, 10)) ax.boxplot(data ) ax.set_ylim(interval_y) #plt.show() plt.savefig('fig/fig.png', format='png') #print_box_plot([1, 5, 25, 54, 3], [0,30]) def mean_reciprocal_rank_claudio(rs): rr = [] for r in rs: try: r = r.index(1) +1 except: r = 130 #r = sys.maxsize # considerar que o elemento nao existe #r = len(r)+1 # considerar que o item relevante esta na proxima posicao depois do k rr.append(1 / r ) return arredonda(statistics.mean(rr) ), arredonda(ic(len(rr), statistics.stdev(rr), 0.95) ) #return arredonda(100* statistics.mean(rr) ), arredonda(100* ic(len(rr), statistics.stdev(rr), 0.95) ) def mean_reciprocal_rank(rs): rs = (np.asarray(r).nonzero()[0] for r in rs) return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs]) def precision_at_k(r, k): assert k >= 1 r = np.asarray(r)[:k] != 0 if r.size != k: raise ValueError('Relevance score length < k') return np.mean(r) def average_precision(r): r = np.asarray(r) != 0 out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]] if not out: return 0. return np.mean(out) def mean_average_precision(rs): return np.mean([average_precision(r) for r in rs]) def read_data(): articles = cv.arquivo_para_corpus_delimiter(f'dataset/archive/shared_articles.csv', ',')#, 1000) # 0 timestamp, 2 contenId, 3 authorPersonId, 10 title, 11 text user = cv.arquivo_para_corpus_delimiter(f'dataset/archive/users_interactions.csv', ',')#, 1000) # indices: 0 timestamp, 2 identificacao conteudo, 3 identificacao autor, 10 titulo, -1 lang articles_dict = dict() # dicionario onde as chaves sao as identificacoes dos autores for index_row in range(1, len( articles)): if articles[index_row][-1] != 'en': # filter english continue if articles_dict.get( articles[index_row][3] ) == None: articles_dict[articles[index_row][3]] = [] articles_dict[articles[index_row][3]].append( {'timestamp': datetime.fromtimestamp( int( articles[index_row][0])) , 'contenId' : articles[index_row][2], 'title' : articles[index_row][10]}) #, 'text' : articles[index_row][11] }) chave_2 = [] for k, v in articles_dict.items(): if len(v)>=2: chave_2.append( k) print(len(chave_2)) x_train = []; x_test = []; ground_truth = [] for k in chave_2: qtd_noticias_user = len(articles_dict[k]) #if qtd_noticias_user > 5: # qtd_noticias_user= 5 #print(qtd_noticias_user) #qtd_noticias_user = 2 titles = [] limit_k = 2; cont=0 for index_noticia_user in range(qtd_noticias_user-1): # -1 porque a ultima é teste if cont == limit_k: break cont+=1 #titles.append( representation_bert( [ articles_dict[k][index_noticia_user]['title'] ], 'bert_concat') [0] ) #titles.append( articles_dict[k][index_noticia_user]['title'] ) titles.append( articles_dict[k][qtd_noticias_user-2 - index_noticia_user]['title'] ) # pega as noticias mais recentes com excessao da ultima #temp = np.mean( representation_bert( titles, 'bert_avg') , axis=0) #print(temp.shape); exit() #x_train.append( np.mean( representation_bert( titles, 'bert_avg') , axis=0) ) #x_train.append( np.mean(titles, axis=0) ) #print(x_train); exit() x_train.append( " ".join( titles )) #x_train.append( " ".join( [articles_dict[k][0]['title'], articles_dict[k][1]['title'] ] )) x_test.append( articles_dict[k][-1]['title']) #x_test.append( representation_bert( [ articles_dict[k][-1]['title'] ], 'bert_avg')[0] ) #for i in range( len( x_train)): print(f'{i}: {x_train[i]}') #for i in range( len( x_test)): print(f'{i}: {x_test[i]}') x_train = [cv.preprocessor(x) for x in x_train] x_test = [cv.preprocessor(x) for x in x_test] #x_train = representation_bert(x_train, 'bert_concat') #x_test = representation_bert(x_test, 'bert_concat') #np.save('temp/x_train_all_bert_ultimas', x_train); np.save('temp/x_test_all_bert_ultimas', x_test) x_train = np.load('temp/x_train_all_bert_ultimas.npy');x_test = np.load('temp/x_test_all_bert_ultimas.npy') #x_train = np.load('temp/x_train3.npy'); x_test = np.load('temp/x_test3.npy') #x_train = np.load('temp/x_train_all.npy'); x_test = np.load('temp/x_test_all.npy') #for i in range( len( x_test)): print(f'{i}: {cv.arredonda(100*(cos.cosine_similarity( [x_train[0]], [x_test[i]] ))[0][0])} ') predicoes_all = [] k = 130# limitar a olhar as k posicoes for avaliar in range(len(x_train)): escore = [] #avaliar = 10 # avaliar usuario com indice 10, o indice 10 é onde esta o elemento relevante para o usuario for i in range( len( x_test)): escore.append( cv.arredonda(100*(cos.cosine_similarity( [x_train[avaliar]], [x_test[i]] ))[0][0]) ) topk = cv.k_max_index2(escore, k) predicao = [] valido = False; index_certo = -1 for index_top in range( len(topk)): if topk[index_top] in [avaliar]: valido = True index_certo= index_top predicao.append(1) else: predicao.append(0) #if valido == True: predicoes_all.append(predicao) print(avaliar, topk, index_certo) print(len(predicoes_all)) print(f'MRR: {mean_reciprocal_rank_claudio(predicoes_all)[0]} +/- {mean_reciprocal_rank_claudio(predicoes_all)[1]}') print(f'MRR: {mean_reciprocal_rank(predicoes_all)} ') #print(f'MAP: {mean_average_precision(predicoes_all)}') #print(f'Escore top: {escore[topk[0]]}, escore gabarito: {escore[avaliar]}') if __name__ == "__main__": read_data() # --- backup ''' publicaram = [] for k, v in articles_dict.items(): publicaram.append(len(v)) print_box_plot(publicaram, [0, 30]); exit() publicacoes = [] for k, v in articles_dict.items(): for i in range(len(v)): publicacoes.append( v[i]['timestamp'].month ) print_box_plot(publicacoes); exit() '''
import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('E:\csvdhf5xlsxurlallfiles\stocks.csv') print(df.head()) print(df.columns) aapl=df['AAPL'] ibm=df['IBM'] print(type(aapl)) print(type(ibm)) print(aapl.shape) print(ibm.shape) plt.scatter(aapl, ibm, marker='o', color='red', label='stock1') plt.legend(loc='upper right') plt.show()
# -*-coding:utf-8 -*- from setuptools import setup, find_packages __author__ = "lqs" setup( name="AESHelper", version="0.1", description="encrypt plain text or decrypt encrypted text using AES", author="lqs", author_email="li_qinshan@126.com", url="https://github.com/davechina/AESHelper", license="MIT", packages=find_packages(), classifiers=['Programming Language :: Python :: 3.4'], keywords="aes", install_requires=['pycrypto>=2.6.1'], zip_safe=True, include_package_data=True )
from django.shortcuts import render, redirect, get_object_or_404 from django.views import generic # from django.urls import reverse_lazy # from django.views.generic.edit import UpdateView, DeleteView from django.contrib import messages from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.decorators import login_required from django.utils import timezone from django.core.exceptions import PermissionDenied # from django.http import HttpResponse from .models import Question, Answer # , RateQuestion, RateAnswer from .forms import QuestionForm, AnswerForm class IndexView(generic.ListView): model = Question template_name = 'qa/index.html' context_object_name = 'question_list' paginate_by = 6 class DetailView(generic.DetailView): model = Question template_name = 'qa/detail.html' def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) return context def register(request): if request.method == 'POST': form = UserCreationForm(request.POST) if form.is_valid(): form.save() messages.success(request, "Account Created successfully.") return redirect('login') else: form = UserCreationForm() return render(request, 'registration/register.html', {'form': form}) @login_required def question_new(request): if request.method == 'POST': form = QuestionForm(request.POST) if form.is_valid(): question = form.save(commit=False) question.contributor = request.user question.date = timezone.now() question.save() return redirect('qa:answers', pk=question.pk) else: form = QuestionForm() return render(request, 'qa/question_form.html', {'form': form}) @login_required def question_edit(request, pk): # TODO: add contributor condition question = get_object_or_404(Question, pk=pk) print(question) if question.contributor != request.user: raise PermissionDenied(u"You don't have permission to edit this.") if request.method == 'POST': form = QuestionForm(request.POST, instance=question) if form.is_valid(): question = form.save(commit=False) question.contributor = request.user question.date = timezone.now() question.save() return redirect('qa:answers', pk=question.pk) else: form = QuestionForm(instance=question) return render(request, 'qa/question_form.html', {'form': form}) @login_required def answer_edit(request, pk): answer = get_object_or_404(Answer, pk=pk) if answer.contributor != request.user: raise PermissionDenied(u"You don't have permission to edit this.") if request.method == 'POST': form = AnswerForm(request.POST, instance=answer) if form.is_valid(): answer = form.save(commit=False) # TODO: updating question id answer.question_id = pk answer.contributor = request.user answer.date = timezone.now() answer.save() return redirect('qa:answers', pk=answer.question.id) else: form = AnswerForm(instance=answer) return render(request, 'qa/answer_form.html', {'form': form}) @login_required def question_delete(request, pk): question = get_object_or_404(Question, pk=pk) if question.contributor != request.user: raise PermissionDenied(u"You don't have permission to delete this.") question.delete() return redirect('qa:index') @login_required def answer_delete(request, pk): answer = get_object_or_404(Answer, pk=pk) if answer.contributor != request.user: raise PermissionDenied(u"You don't have permission to delete this.") answer.delete() return redirect('qa:answers', pk=answer.question.id) @login_required def new_answer(request, pk): question = Question.objects.get(pk=pk) if request.method == 'POST': form = AnswerForm(request.POST) if form.is_valid(): answer = form.save(commit=False) answer.question_id = pk answer.contributor = request.user answer.date = timezone.now() answer.save() return redirect('qa:answers', pk=question.pk) else: form = AnswerForm() return render(request, 'qa/answer_form.html', {'form': form, 'question': question}) # TODO move these methods to helpers.py or utils.py ''' upvote check if user has upvoted previously. if he didn't ''' def remove_vote(object_name, upvote, user_obj): # print("--remove--", object_name, upvote, user_obj) if upvote: object_name.total_upvotes -= 1 object_name.upvoted_by_users.remove(user_obj) else: object_name.total_downvotes -= 1 object_name.downvoted_by_users.remove(user_obj) object_name.save() def add_vote(object_name, upvote, user_obj): # print("--add--", object_name, upvote, user_obj) if upvote: object_name.total_upvotes += 1 object_name.upvoted_by_users.add(user_obj) else: object_name.total_downvotes += 1 object_name.downvoted_by_users.add(user_obj) object_name.save() ''' case 1: upvote case 2: downvote case 3: previously upvoted and now you want to downvote. case 4: previously downvoted and now you want to upvote. ''' ''' question: upvote downvote upvote -> disabling upvote enable downvote downvote -> ''' # TODO: refactor this. @login_required def question_vote(request, pk): question = get_object_or_404(Question, pk=pk) if request.method == 'POST': upvote = True if "upvote" in request.POST else False user = request.user reset_user_question_votes_to_zero(question, user) if upvote: # just remove both votes # reset to zero # upvote_question(question, user) question.upvote() # if user in question.downvoted_by_users.all(): # remove_vote(question, False, user) # if user in question.upvoted_by_users.all(): # remove_vote(question, True, user) # else: # add_vote(question, True, user) else: question.downvote() downvote_question(question, user) # if user in question.upvoted_by_users.all(): # remove_vote(question, True, user) # if user in question.downvoted_by_users.all(): # remove_vote(question, False, user) # else: # add_vote(question, False, user) return redirect('qa:answers', pk=pk) @login_required def answer_vote(request, pk): answer = get_object_or_404(Answer, pk=pk) if request.method == 'POST': vote = True if "upvote" in request.POST else False user = request.user if vote: if user in answer.downvoted_by_users.all(): remove_vote(answer, False, user) if user in answer.upvoted_by_users.all(): remove_vote(answer, True, user) else: add_vote(answer, True, user) else: if user in answer.upvoted_by_users.all(): remove_vote(answer, True, user) if user in answer.downvoted_by_users.all(): remove_vote(answer, False, user) else: add_vote(answer, False, user) return redirect('qa:answers', pk=answer.question.id)
from datetime import datetime, timedelta import time START_DATE = datetime.strptime('2015-04-04', "%Y-%m-%d") LAT, LON = 52.425050, -7.099290 def daterange(start_date, end_date): for n in range(int((end_date - start_date).days)): yield start_date + timedelta(n) def get_operating_hours(): today = datetime.now() operating_hours = 0 for single_date in daterange(START_DATE, today): day = datetime.fromtimestamp(time.mktime(single_date.timetuple())) return 1.05 * operating_hours # adding 5% overhead def get_current_overall_watts(): """ returns the lates entry from the current table of latest watts used in the building from the main incoming feed """ wattsNow = Weather.query.with_entities(Weather.temp, Weather.weather_id, Weather.description).filter( Weather.created_at >= (datetime.now() - timedelta(days=2))).order_by( Weather.id.desc()).first() return WeatherNow
from django.shortcuts import render from phoneuser.forms import RegistrationForm def user_register(request): """ Simple View for user registration """ if request.method == 'POST': formset = RegistrationForm(request.POST, request.FILES) if formset.is_valid(): formset.save() else: formset = RegistrationForm() return render(request, 'registration.html', {'formset': formset})
import shutil import platform import os version = '1.14.0' destdir = os.path.join(version, '{0}-{1}'.format(platform.system(), platform.architecture()[0])) os.makedirs(destdir) print('moving NFSIm to {0}\n'.format(destdir)) if platform.system() != 'Windows': shutil.move('NFsim', os.path.join(destdir, 'NFsim')) else: shutil.move('NFsim.exe', os.path.join(destdir, 'NFsim.exe'))
# -*-coding:utf-8 -*- """AES/CBC/PKCS5Padding加解密的工具 关于AES: 私钥aes_key的长度, 必须是16、24、或32个字节长度的一种; 初始向量aes_iv, ecb、cfb模式下不需要该参数, cbc模式下, 是一堆长度为AES.block_size的十六进制的0; aes同时要求被加密的文本长度必须为16的倍数, 不足则补位填充。具体的填充方法由填充模式决定。 加解密顺序: 加密: 明文密码 --> 补位填充 --> (CBC)加密 --> base64编码 --> 加密字符串 解密: 加密字符串 --> base64解码 --> (CBC)解密 --> 去除填充 --> 明文密码 注意事项: 要注意的是, 加密/解密不能使用同一个AES对象, 即使他们的初始向量相同, 所以不能再初始化中生成aes对象。这个地方折腾了很久。。 Refer to: https://reality0ne.com/a-tip-of-pycrypto/ """ import base64 from Crypto.Cipher import AES from Crypto.Hash import MD5 __author__ = "lqs" class AesHelper: def __init__(self, aes_key, aes_iv, mode=AES.MODE_CBC): """ :param aes_key: 私钥 :param aes_iv: 初始向量 :param mode: aes加密模式, 默认cbc """ self.aes_key = aes_key self.aes_iv = aes_iv self.aes_mode = mode @classmethod def _md5(cls, content): """生成初始向量""" m = MD5.new() try: m.update(content) except TypeError: m.update(content.encode()) finally: return m.digest() @classmethod def padding(cls, data): """采用pkcs5/7的补位方式""" bs = AES.block_size pad = bs - len(data) % bs return data + pad * chr(pad) @classmethod def unpadding(cls, data): return data[0:-data[-1]] def _gen(self): """强制转换aes_key, aes_iv aes算法对密钥的长度有要求: 16、24、或32字节中的一种。我们使用的是32个字节长度。 """ if self.aes_key is None or len(self.aes_key) != 32: raise RuntimeError("invalid key") if self.aes_iv is None: self.aes_iv = self._md5(self.aes_key) return AES.new(self.aes_key, self.aes_mode, self.aes_iv) def encrypt(self, plaintext): cipher = self._gen() text = self.padding(plaintext) return cipher.encrypt(text) def decrypt(self, encrypted): cipher = self._gen() text = cipher.decrypt(encrypted) return self.unpadding(text)
""" Given number (num), return index value of Fibonacci sequence where the sequence is: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34 ... The pattern of the sequence is that each value is the sum of the previous two values EG. IN => 8 || OUT => 21 """ # O(2^n) # adds readability, but call stack memory is higher (no tail call optimisation) def fibonacciRecursive(num): if num < 2: return num return fibonacciRecursive(num-1) + fibonacciRecursive(num-2) # O(n) def fibonacciIter(num): sequence = [0,1] for x in range(2,num+1): sequence.append(sequence[x-2] + sequence[x-1]) return sequence[num] print(fibonacciRecursive(8)) print(fibonacciIter(8))
trunk_dict = {'FastEthernet0/1':[10,20], 'FastEthernet0/2':[11,30], 'FastEthernet0/4':[17]} def generate_trunk_config(trunk): ''' trunk - словарь trunk-портов для которых необходимо сгенерировать конфигурацию. Возвращает список всез команд, которые были сгенерированы на основе шаблона ''' trunk_template = ['switchport trunk encapsulation dot1q', 'switchport mode trunk', 'switchport trunk native vlan 999', 'switchport trunk allowed vlan'] trunk_config = {} for interface,vlans in trunk.items(): interface = 'interface {}'.format(interface) trunk_config[interface] = [] for line in trunk_template: if line.endswith('allowed vlan'): temp = [] for vlan in vlans: vlan = temp.append(str(vlan)) trunk_config[interface].append('{} {}'.format(line, ','.join(temp))) continue trunk_config[interface].append(line) return trunk_config config = generate_trunk_config(trunk_dict) print(config)
from Bagger import Bagger # import Bagger # 3 test cases classified as customer(x) customer1 = [ {"name": "bread", "container_type": "plastic bag", "size": "medium", "frozen": False}, {"name": "glop", "container_type": "jar", "size": "small", "frozen": False}, {"name": "granola", "container_type": "cardboard box", "size": "large", "frozen": False}, {"name": "ice cream", "container_type": "cardboard carton", "size": "medium", "frozen": True}, {"name": "potato chips", "container_type": "plastic bag", "size": "medium", "frozen": False} ] customer2 = [ {"name": "waffles", "container_type": "carboard box", "size": "large", "frozen": False}, {"name": "mac and cheese", "container_type": "cardboard box", "size": "medium", "frozen": False}, {"name": "orange juice", "container_type": "plastic bottle", "size": "large", "frozen": False}, {"name": "ice cream", "container_type": "cardboard carton", "size": "medium", "frozen": True}, {"name": "oreo cookies", "container_type": "plastic bag", "size": "medium", "frozen": False}, {"name": "bread", "container_type": "plastic bag", "size": "medium", "frozen": False}, {"name": "granola", "container_type": "cardboard box", "size": "large", "frozen": False}, {"name": "potato chips", "container_type": "plastic bag", "size": "medium", "frozen": False} ] customer3 = [ {"name": "waffles", "container_type": "carboard box", "size": "large", "frozen": False}, {"name": "granola", "container_type": "cardboard box", "size": "large", "frozen": False}, {"name": "orange juice", "container_type": "plastic bottle", "size": "large", "frozen": False}, ] # driving agent def agent(customer): bagger = Bagger(customer) # create new bagger bagger.bag_items() # bag items print(bagger) # print bagger output # use agent to process each "customer" agent(customer1) agent(customer2) agent(customer3)
#!/usr/bin/env python # -*- coding:utf-8 -*- import string # -------------------- 字符串与数字之间的转换 -------------------- print(repr(42)) print(str(42)) print(float('1.2e5')) print(repr('spam')) print(str('spam')) print("int('1101', 2):\n\t", int('1101', 2)) # -------------------- 字符与ASCII码之间的转换 -------------------- # ord() 将单个字符转换为ASCII码 # chr() 将ASCII码转换为单个字符 print('\n获取所有字母的ASCII码!\n') # for i in string.ascii_letters: # for x in range(0, 3): # print('%s\t%d' % (i, ord(i))) for i in range(0, len(string.ascii_letters), 4): print('%s\t%d\t\t\t%s\t%d\t\t\t%s\t%d\t\t\t%s\t%d' % (string.ascii_letters[i], ord(string.ascii_letters[i]), string.ascii_letters[i + 1], ord(string.ascii_letters[i + 1]), string.ascii_letters[i + 2], ord(string.ascii_letters[i + 2]), string.ascii_letters[i + 3], ord(string.ascii_letters[i + 3]), ))
import peewee from playhouse.shortcuts import RetryOperationalError from flask_admin.contrib.peewee import ModelView from flask_admin.actions import action from flask_admin import expose from flask import flash from flask_admin.form import rules from wtforms.validators import ValidationError # see: flask-admin/examples/peewee as source example class UserInfoRetryDB(RetryOperationalError, peewee.SqliteDatabase): pass class BaseModel(peewee.Model): class Meta: database = None class User(BaseModel): name = peewee.TextField(unique=True) def __unicode__(self): return self.name class MessageInfo(BaseModel): # related_name is the back reference for the foreign key user_name = peewee.ForeignKeyField(User, related_name='user_name') subject = peewee.TextField() description = peewee.TextField() def __unicode_(self): return self.description # Meta class cannot be accessed directly; use _meta # see : # http://docs.peewee-orm.com/en/latest/peewee/models.html#model-options-and-table-metadata def init_db(db_name): db = UserInfoRetryDB(db_name, check_same_thread=False) User._meta.database = db MessageInfo._meta.database = db UserAdminView._model = User MessageInfoAdminView._model = MessageInfo def get_user_messages(): user_messages = {} for e in MessageInfo.select(): message = { 'user_name': e.user_name.name, 'subject': e.subject, 'description': e.description } user_messages[e.user_name.name] = message return user_messages class UserAdminView(ModelView): _model = None @action('approve', 'Approve', 'Are you sure you want to approve selected users?') def action_approve(self, ids): flash('Approving users!') form_create_rules = [ rules.Field('name') ] def no_root_allowed(form, field): if field.data == 'root': raise ValidationError('"root" is not allowed') def no_duplicates(form, field): name = field.data if UserAdminView.check_duplicate(name) is True: raise ValidationError('"duplicates" is not allowed') @classmethod def check_duplicate(cls, name): try: cls._model.get(cls._model.name == name) except peewee.DoesNotExist: return False else: msg = str(name) + ' already exist' flash(msg) return True form_args = dict( name=dict(validators=[no_root_allowed, no_duplicates]) ) @expose('/hello') def index(self): return 'Hello World!' def on_model_change(self, form, model, is_created): return super(UserAdminView, self).on_model_change(form, model, is_created) def update_model(self, form, model): return super(UserAdminView, self).update_model(form, model) def create_model(self, form): return super(UserAdminView, self).create_model(form) def delete_model(self, model): return super(UserAdminView, self).delete_model(model) class MessageInfoAdminView(ModelView): _model = None # TODO: create Column filters pass
from sklearn.mixture import GaussianMixture import matplotlib.pyplot as plt import numpy as np import sys data = np.genfromtxt(sys.argv[1]) data = data.reshape(-1,1) num_clusters = int(sys.argv[2]) best_comp = 0 best_bic = 1000000 best_model = [] n_comp = np.arange(1,num_clusters + 1) for i in n_comp : gm = GaussianMixture(n_components=i) gm.fit(data) bic = gm.bic(data) print(str(bic)) if bic < best_bic : best_bic = bic best_comp = i best_model = gm print('Best BIC = ' + str(best_bic) + ' using model with ' + str(best_comp) + ' components.') samples = np.arange(np.floor(min(data)),np.ceil(max(data))) probs = best_model.score_samples(samples[:,np.newaxis]) plt.plot(list(data), list(np.zeros_like(data)),'x') plt.plot(list(samples),list(np.e**probs),'.') plt.show()
def get_average(marks): num_marks = marks_sum = 0 for mark in marks: num_marks += 1 marks_sum += mark return marks_sum / num_marks
#!/usr/bin/python3 L=[4,3,8,1,7] def rechercher(n): for val in L: if (val==n): return True return False print(rechercher(5)) # on recherche 5 doit renvoyer False print(rechercher(7)) # on recherche 7 doit renvoyer True
# 六. 类与面向对象 # 1--最普通的类 class C1(object): spam = 42 # 数据属性 def __init__(self, name): self.name = name def __del__(self): print("good bye", self.name) # I1 = C1('Bob') # 2--Python的类没有基于参数的函数重载 # class FirstClass(object): # def test(self, string): # 被后面覆盖掉了 # print(string) # # def test(self): # 此时类中只有一个test函数, 即后者test(self), 它覆盖掉前者带参数的test函数 # print("hello world") # 3--子类扩展超类: 尽量调用超类的方法 # class Manager(Person): # def giveRaise(self, percent, bonus = .10): # self.pay = int(self.pay*(1+percent + bonus)) # 不好的方式, 复制粘贴超类代码 # Person.giveRaise(self, percent+bonus) # 好的方式, 尽量调用超类方法; 超类, 就是父类, 定义类括号里的内容 # 3--类的命名空间 # class Member(object): # count = 0 # def init(self): # Member.count += 1 # print(Member.count) # t1 = Member() # t1.init() # print(t1.count) # print(Member.count) # t2 = Member() # t2.init() # print(t2.count) # print(Member.count) # 4--类内省工具 class Person: pass bob = Person() # bob.__class__ # <class '__main__.Person'> # bob.__class__.__name__ # Person # bob.__dict__ # 5--返回1中, 数据类型spam是数据类 ,而不是对象 # 6--类方法调用的两种方式 # instance.method(arg...) # 实例.方法(参数) # class.method(instance, arg...) # 类名.方法(实例, 参数) # class A: # def test(self, *args): # print("A-test", self, args) # a = A() # a.test(1234) # A.test(a, 1234, 5678) # A.test(A(), 123, 123, 454) # 7--抽象超类的实现方法: # 超类:super() # 抽象类: 一般而言, 抽象类是不能实例化的类, 其职责是定义子类应实现的一组抽象方法 # 抽象超类: 类的部分行为由子类来提供. 如果预期的方法在子类中没有定义, 那么会抛出没有定义变量名的异常; # (1)某个函数中调用未定义的函数, 子类中定义该函数 # def delegate(self): # self.action() # 本类中不定义action函数, 所有使用delegate函数时就会出错 # (2)定义action函数, 但是返回异常 # def action(self): # raise NotImplementedError('action must be defined') # (3)上述的两种方法还都可以定义实例对象, 实际上可以利用@装饰器语法生成不能定义的抽象超类 # from abc import ABCMeta, abstractclassmethod # class Super(metaclass=ABCMeta): # @abstractclassmethod # 这个类不能再被定义了 # def action(self): # pass # x = Super() # TypeError: Can't instantiate abstract class Super with abstract methods action # 8--OOP: 面向对象编程: object-oriented programming # (一)OOP和继承: 'is-a'的关系 class B:pass class A(B): pass a = A() isinstance(a, B) # 返回True # A是B的子类, a也是B的一种 # type不会认为子类是一种父类类型, isinstance会认为子类是一种父类类型 # print(type(a) == A) # True # print(type(a) == B) # False # print(type(a)) # print(a) # (二)OOP和组合: "has-a"的关系 class A(object): def __init__(self,name): self.name = name class B(object): def __init__(self,phone): self.phone = phone class C(object): def __init__(self, name, phone): self.name = A(name) # 组合 self.phone = B(phone) # 组合 # (三)OOP和委托: "包装"对象, 在python中委托通常是以"__getattr__"钩子方法实现的, 这个方法会拦截对不存在属性的读取 # 包装类(或者成为代理类)可以使用__getattr__把任意读取转发给被包装的对象 class wrapper(object): def __init__(self, object): self.wrapped = object def __getattr__(self, attrname): print('Trace', attrname) return getattr(self.wrapped, attrname) # 注: 这里使用getattr(X, N)内置函数以变量名字符串N从包装对象X中取出属性. 类似于X.__dict__[N] x = wrapper([1, 2, 3]) # x.append(4) # 返回Trace: append" [1, 2, 3, 4] # x = wrapper({'a':1, 'b':2}) # print(list(x.keys())) # 返回 "Trace: keys" ['a', 'b'] # todo # https://zhuanlan.zhihu.com/p/40446047 # todo https://zhuanlan.zhihu.com/p/29747657 # 9--类的伪私有属性: 使用__attr class C1(object): def __init__(self, name): self.__name = name def __str__(self): return "self.name = %s" %self.__name I = C1('tom') # print(I) # 返回 self.name = tom # I.__name = 'jeey' # 这里无法访问, __name为伪私有属性 I._C1__name = 'jeey' # 这里可以修改成功, self.name = jeey # 10--类方法是对象: 无绑定类方法对象 / 绑定实例方法对象 # class Spam(object): # def doit(self, message): # print(message) # 静态方法 # def selfless(message): # print(message) # obj = Spam() # x = obj.doit # 类的绑定方法对象 实例+函数 # x('hello world') # x = Spam.doit # 类的无绑定方法对象 类名 + 函数 # x(obj, 'hello world') # x = Spam.selfless # 类的无绑定方法函数, 在3.0之前无效 # x('hello world') # 11--获取对象信息: 属性和方法 class Myobject: pass a = Myobject() dir(a) hasattr(a, 'x') # 测试是否有x属性或方法, 即a.x是否已经存在 setattr(a, 'y', 19) # 设置属性或方法, 等同于a.y = 19 # 这里有个很骚的技巧, 就是setattr可以设置一个不能访问到的属性, 即只能用getattr获取 setattr(a, "can't touch", 100) # 这里的属性名带有空格, 不能直接访问 # print(getattr(a, "can't touch")) # 但是可以用getattr获取 class A(object): def __init__(self): self.__name = "Luke" self.name = "luke" def get_name(self): return self.__name a = A() # print(a.get_name()) # print(hasattr(a,'get_name')) # print(hasattr(a,'__name') # print(getattr(a, '__name')) # 报错 # print(setattr(a, '__name', "zhangsan")) # print(getattr(a, '__name')) # 设置之后获取到的是zhangsan # 12--为类动态绑定属性或方法: MethodType方法 # 一般创建了一个class的实例后, 可以给该实例绑定任何属性和方法, 这就是动态语言的灵活性 # class Student(object): # pass # s = Student() # s.name = 'Michael' # 动态给实例绑定一个属性 # def set_age(self, age): # 定义函数作为实例方法 # self.age = age # from types import MethodType # s.set_age = MethodType(set_age, s) # 给实例绑定一个方法, 类和其它实例不受此印象 # s.set_age(25) # 调用实例方法 # print(s.age) # Student.set_age = MethodType(set_age, Student) # 为类绑定一个方法, 类的所有实例都拥有该方法 # w = Student() # w.set_age(10) # print(w.age)
from django.shortcuts import render, redirect, get_object_or_404 from django.http import JsonResponse, HttpResponseRedirect from django.contrib.auth import authenticate, login, logout from django.contrib import messages from django.contrib.auth.decorators import login_required from .forms import * import json from .models import Order, OrderItem from products.models import Product from ecommerce_website.utils import get_total, get_items def user_register_view(request): if request.method == 'POST': form = UserRegisterForm(request.POST) if form.is_valid(): form.save() return redirect('/') else: # If form not submitted, render blank form form = UserRegisterForm() context = {'form': form} return render(request, 'users/register.html', context) def user_login_view(request): if request.method == 'POST': form = UserLoginForm(request=request, data=request.POST) if form.is_valid(): email = form.cleaned_data.get('username') password = form.cleaned_data.get('password') user = authenticate(email=email, password=password) if user is not None: login(request, user) messages.success(request, 'Logged in as %s' % MyUser.objects.get(email=email)) # Redirect to a success page. return redirect('/') else: # Return an 'invalid login' error message. messages.error(request, 'Invalid username or password') else: messages.error(request, 'Invalid username or password') else: # If form not submitted, render blank form form = UserLoginForm() context = {'form': form} return render(request, 'users/login.html', context) def user_logout_view(request): logout(request) return render(request, 'users/logout.html') def order_view(request): if request.user.is_authenticated: order = request.user.order order_items = order.item.all() else: order_items = [] if not 'cart' in request.session: request.session['cart'] = {} for key, item in request.session['cart'].items(): order_items.append(item) context = { 'order_items': order_items, 'items': get_items(request), 'total': get_total(request) } return render(request, 'users/order.html', context) def add_to_cart(request, pk): user = request.user product = get_object_or_404(Product, pk=pk) if user.is_authenticated: order, created = Order.objects.get_or_create(user=user) order_item, created = OrderItem.objects.get_or_create(order=order, product=product) order_item.quantity += 1 order_item.save() else: if not 'cart' in request.session: request.session['cart'] = {} if str(product.id) in request.session['cart']: request.session['cart'][str(product.id)]['quantity'] += 1 else: request.session['cart'][str(product.id)] = { 'product': { 'pk': product.pk, 'name': product.name, 'imageURL': product.imageURL, 'price': product.serialized_price }, 'quantity': 1 } print('Cart: %s' % request.session['cart']) messages.success(request, 'Item added to cart!', extra_tags='primary') return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/')) def remove_from_cart(request, pk): user = request.user product = get_object_or_404(Product, pk=pk) if user.is_authenticated: order = Order.objects.get(user=user) order_item = OrderItem.objects.get(order=order, product=product) order_item.quantity -= 1 order_item.save() if order_item.quantity <= 0: order_item.delete() else: request.session['cart'][str(product.id)]['quantity'] -= 1 if request.session['cart'][str(product.id)]['quantity'] <= 0: del request.session['cart'][str(product.id)] messages.warning(request, 'Item removed from cart!') return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/')) def remove_item_from_cart(request, pk): user = request.user product = get_object_or_404(Product, pk=pk) if user.is_authenticated: order = Order.objects.get(user=user) order_item = OrderItem.objects.get(order=order, product=product) order_item.delete() else: del request.session['cart'][str(product.id)] messages.warning(request, 'Item(s) removed from cart!', extra_tags='danger') return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
import shelve from contextlib import closing import datetime # with closing(shelve.open('./data/blog')) as shelf: # shelf['wocao'] = 'hahaha' class Blog: def __init__(self, title, *posts): self.title = title def as_dict(self): return dict( title=self.title, underline="=" * len(self.title) ) class Post: def __init__(self, date, title, rst_txt, tags): self.date = date self.title = title self.rst_txt = rst_txt self.tags = tags def as_dict(self): return dict( date=str(self.date), title=self.title, rst_txt=self.rst_txt, tag_text=" ".join(self.tags) ) # b1 = Blog(title='Travel Blog') # shelf = shelve.open('./data/blog') # b1._id = 'Blog:1' # shelf[b1._id] = b1 p2 = Post(date=datetime.datetime(2013, 11, 14, 17, 25), title="Hard Aground", rst_txt="""Some embarrassing revelation....""", tags=("#RedRanger", "#Whitby42", "#ICW") ) p3 = Post(date=datetime.datetime(2013, 11, 18, 17, 25), title="Anchor Follies", rst_txt="""Some witty revelation....""", tags=("#RedRanger", "#Whitby42", "#Mistakes") ) b1 = Blog(title='Travel Blog') shelf = shelve.open('./data/blog') owner = shelf['Blog:1'] p2._parent = owner._id p2._id = p2._parent + ":Post:2" shelf[p2._id] = p2 p3._parent = owner._id p3._id = p3._parent + ":Post:3" shelf[p3._id] = p3 print(list(shelf.keys())) class BlogFactory: @staticmethod def version(self, version): self.version = version @staticmethod def blog(self, *args, **kw): if self.version == 1: return Blog(*args, **kw) blog = BlogFactory.version(2).blog(title='this', other_attribute='that')
# _*_ coding utf-8 _*_ """ @File : geographical_coordination.py @Author: yxwang @Date : 2020/4/29 @Desc : """ from scipy.optimize import minimize from scipy import integrate import numpy as np import matplotlib.pyplot as plt # 1. 2D inhomogeneous PPP分布 # 2. 对于非其次泊松点过程的模拟,首先模拟一个均匀的泊松点过程,然后根据确定性函数适当地变换这些点 # 3. 模拟联合分布的随机变量的标准方法是使用 马尔可夫链蒙特卡洛;应用MCMC方法就是简单地将随机 点处理操作重复应用于所有点 # 将使用基于Thinning的通用但更简单的方法(Thinning是模拟非均匀泊松点过程的最简单,最通用的方法) # plt.close('all') class geographical_coordination: def __init__(self, xMin, xMax, yMin, yMax, num_Sim=1, s=0.5): self.xMin = xMin self.xMax = xMax self.yMin = yMin self.yMax = yMax self.xDelta = xMax - xMin self.yDelta = yMax - yMin self.areaTotal = self.xDelta * self.yDelta self.num_Sim = num_Sim self.s = s self.resultsOpt = None self.lambdaNegMin = None self.lambdaMax = None self.numbPointsRetained = None self.numbPoints = None self.xxRetained = [] self.yyRetained = [] self.xxThinned = [] self.yyThinned = [] # point process params def fun_lambda(self, x, y): # intensity function return 100 * np.exp(-(x ** 2 + y ** 2) / self.s ** 2) # define thinning probability function def fun_p(self, x, y): return self.fun_lambda(x, y) / self.lambdaMax def fun_neg(self, x): # negative of lambda # fun_neg = lambda x: -fun_lambda(x[0], x[1]) return -self.fun_lambda(x[0], x[1]) def geographical_coordinates(self): # initial value(ie center) xy0 = [(self.xMin + self.xMax) / 2, (self.yMin + self.yMax) / 2] # Find largest lambda value self.resultsOpt = minimize(self.fun_neg, xy0, bounds=((self.xMin, self.xMax), (self.yMin, self.yMax))) self.lambdaNegMin = self.resultsOpt.fun # retrieve minimum value found by minimize self.lambdaMax = -self.lambdaNegMin # for collecting statistics -- set num_Sim=1 for one simulation self.numbPointsRetained = np.zeros(self.num_Sim) for ii in range(self.num_Sim): # Simulate a Poisson point process # Poisson number of points self.numbPoints = np.random.poisson(self.areaTotal * self.lambdaMax) # x coordinates of Poisson points # y coordinates of Poisson points xx = np.random.uniform(0, self.xDelta, (self.numbPoints, 1)) + self.xMin yy = np.random.uniform(0, self.yDelta, (self.numbPoints, 1)) + self.yMin # calculate spatially-dependent thinning probabilities p = self.fun_p(xx, yy) # Generate Bernoulli variables (ie coin flips) for thinning # points to be retained # Spatially independent thinning booleRetained = np.random.uniform(0, 1, (self.numbPoints, 1)) < p booleThinned = ~booleRetained # x/y locations of retained points self.xxRetained = xx[booleRetained] self.yyRetained = yy[booleRetained] self.xxThinned = xx[booleThinned] self.yyThinned = yy[booleThinned] self.numbPointsRetained[ii] = self.xxRetained.size return self.xxRetained, self.yyRetained, self.s
#!/usr/bin/env python import argparse import numpy as np import logging from collections import Counter from rankedsim import utfopen from nicemodel import load_labels from aesir import itersplit def tocdf(nonnormalized_dist): normalized = np.array(nonnormalized_dist) / float(np.sum(nonnormalized_dist)) ids = normalized.argsort()[::-1] revsorted = normalized[ids] return zip(ids + 1, revsorted.cumsum()) def stochastic_choice(cdf): x = np.random.rand() for i, p in cdf: if x <= p: break return i def load_features(file): with utfopen(file) as f: feature_vectors = [l.rstrip().split("\t") for l in f.readlines()] feature_vectors = [(w, map(float, v.split())) for w, v in feature_vectors] feature_cdfs = { w : tocdf(v) for w, v in feature_vectors } return feature_cdfs def word_ids_to_features(vocab_dist, feature_dists): output = {} for wid, w in vocab_dist.iteritems(): if not w.endswith('/NN'): continue wn = w[:w.rindex('/')] if wn in feature_dists: output[str(wid)] = feature_dists[wn] logging.info("%d words have features." % len(output)) return output def main(): parser = argparse.ArgumentParser(description='Stochastically adds features to a corpus.') parser.add_argument('--vocab', '-v', metavar='FILE', help='The vocab labels.') parser.add_argument('--input', '-i', metavar='FILE', help='The input corpus (in Andrews format).') parser.add_argument('--output', '-o', metavar='FILE', help='The output corpus (in Andrews format).') parser.add_argument('--features', '-f', metavar='FILE', help='The (dense) vector space of features.') args = parser.parse_args() vocab_labels = load_labels(args.vocab) features = load_features(args.features) feature_map = word_ids_to_features(vocab_labels, features) logging.info("First pass; gathering statistics.") inpt = utfopen(args.input) numlines = len(inpt.readlines()) inpt.close() logging.info("Starting second pass; actually writing output.") output = open(args.output, 'w', 1024*1024) inpt = utfopen(args.input) for lno, line in enumerate(inpt.readlines(), 1): if lno % 1000 == 0: logging.info("Processing doc# %d/%d (%4.1f%%)" % (lno, numlines, 100*float(lno)/numlines)) for chunk in itersplit(line, ' '): chunk = chunk.rstrip() if not chunk: continue idx = chunk.rindex(":") wid, cnt = chunk[:idx], chunk[idx+1:] if wid not in feature_map: output.write(chunk + ' ') else: cnt = int(cnt) dist = feature_map[wid] cnts = Counter(stochastic_choice(dist) for i in xrange(cnt)) for fid, cnt in cnts.iteritems(): output.write('%s,%d:%d ' % (wid, fid, cnt)) output.write('\n') inpt.close() output.close() if __name__ == '__main__': main()
a=input().split(" ") b=int(a[0]) c=int(a[1]) k=1 def sai(): print(".|.",end="") for i in range((b-1)//2): for j in range((c-3)//2): print("-",end="") for l in range(k): sai() for j in range((c-3)//2): print("-",end="") print() c=c-6 k=k+2 for i in range((int(a[1])-7)//2): print("-",end="") print("WELCOME",end="") for i in range((int(a[1])-7)//2): print("-",end="") print() k=k-2 c=c+6 for i in range((b-1)//2): for j in range((c-3)//2): print("-",end="") for l in range(k): sai() for j in range((c-3)//2): print("-",end="") print() c=c+6 k=k-2
# Python Standard Libraries # N/A # Third-Party Libraries from rest_framework.viewsets import ModelViewSet from rest_framework.permissions import IsAuthenticated # Custom Libraries from .item_model import Item from .item_serializer import ItemSerializer class ItemViewSet(ModelViewSet): serializer_class = ItemSerializer permission_classes = [IsAuthenticated] queryset = Item.objects.all()
import os import csv import matplotlib.pyplot as graph from numpy import mean def get_csv_files(): csv_files = "C:/Users/nicle/PycharmProjects/week 13/csv" csv_file_list = [] for file in os.listdir(csv_files): csv_file_list.append(csv_files + "/" + file) return csv_file_list def get_ticker_from_file(file): CompanyName = file.split("csv/")[-1][:-4] return CompanyName def get_date_and_price_list_from_csv(file_name): date_list, close_list = [], [] with open(file_name) as csv_file: for row in list(csv.reader(csv_file))[1:]: date_list.append(row[0]) close_list.append(float(row[4])) return date_list, close_list def graph_xlist_and_ylist(x, y, CompanyName): graph.plot(x, y, label=CompanyName) def show_graph(): graph.xlabel("Date") graph.ylabel("Closing Price") graph.xticks(rotation=90) graph.title("Closing Prices of Tech Sector Stocks Throughout The Pandemic") graph.legend(loc = "upper left") graph.grid() graph.show() def main(): print("Which technology sector stocks have been the best performers throughout the Covid-19 Pandemic?") print("Let's analyze the weekly stock performance of 15 tech sector stocks from November 2019 to November 2020...") NASDAQ = [] with open("C:/Users/nicle/PycharmProjects/week 13/ndaq/NDAQ.csv", "r") as csvfile: readCSV = csv.reader(csvfile) for row in list(readCSV)[1:]: NASDAQ.append(float(row[4])) csvfile.close() csv_file_list = get_csv_files() for csv_file in csv_file_list: ticker = get_ticker_from_file(csv_file) date_list, close_list = get_date_and_price_list_from_csv(csv_file) graph_xlist_and_ylist(date_list, close_list, ticker) for i in close_list: if i > mean(NASDAQ) : print(ticker,": This stock's share price beat the NASDAQ average, so it did well during the Covid Pandemic") break show_graph() main()
import json import requests from rest_framework import viewsets, permissions from rest_framework.generics import GenericAPIView from ..models import EmailOrUsernameModelBackend from ..serializers.login_serializer import LoginSerializer from ..model.users import User from rest_framework.response import Response from utility.response import ApiResponse from rest_framework.permissions import IsAuthenticated from datetime import datetime, timedelta from django.conf import settings from utility.utils import generate_token, get_login_response, generate_oauth_token from utility.constants import STATUS_INACTIVE, STATUS_ACTIVE class LoginViewSet(GenericAPIView, ApiResponse, EmailOrUsernameModelBackend): serializer_class = LoginSerializer def post(self, request, *args, **kwargs): try: host = request.get_host() username = request.data.get('email') password = request.data.get('password') if not username or not password: return ApiResponse.response_bad_request(self, message='Mobile and Password are required') ''' authenticate user and generate token ''' user = EmailOrUsernameModelBackend.authenticate(self, username=username, password=password) if user and user.status == STATUS_INACTIVE: return ApiResponse.response_bad_request(self, message='User is inactive please contact to admin') if user: ''' Authorize to user ''' # token = generate_token(request, user) token = generate_oauth_token(host, username, password) if token.status_code == 200: resp_dict = get_login_response(user, token) resp_dict['token'] = token.json() return ApiResponse.response_ok(self, data=resp_dict, message='Login successful') else: return ApiResponse.response_bad_request(self, message='User Not Authorized') else: return ApiResponse.response_unauthorized(self, message='Invalid username or password. Please try again.') except Exception as e: return ApiResponse.response_internal_server_error(self, message=[str(e.args)])
# -*- coding: utf-8 -*- ''' periodic system functions. ''' # This file is part of luna. # Distributed under the terms of the last AGPL License. # The full license is in the file LICENCE, distributed as part of this software. __author__ = 'Team Machine' import os import glob import binascii from hashlib import md5 import logging import ujson as json from tornado import gen from tornado import httpclient from subprocess import call, Popen, PIPE from sloth.tools import clean_response, get_search_list httpclient.AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient') http_client = httpclient.AsyncHTTPClient() @gen.coroutine def process_uuid_parts(payload_uuid, file_uuid): ''' process split parts ''' # hopefully asynchronous handle function request def handle_request(response): ''' Request Async Handler ''' if response.error: logging.error(response.error) got_response.append({'error':True, 'message': response.error}) else: got_response.append(json.loads(response.body)) for filename in glob.iglob('/tmp/{0}.part*'.format(payload_uuid), recursive=True): got_response = [] message = { 'count': 0, 'results': [] } url = 'https://nonsense.ws/chunks/' # Don't hardcode shit like this! with open(filename, 'rb') as f: content = f.read() try: http_client.fetch( url, method="POST", body=json.dumps({ 'payload': binascii.hexlify(content), 'payload_uuid': payload_uuid, 'uuid': file_uuid }), headers={'Content-Type':'application/json'}, callback=handle_request ) while len(got_response) == 0: # Yo, don't be careless with the time! yield gen.sleep(0.0020) # get stuff from response response = got_response[0] logging.warning("its time to finish this payload {0} from file id {1}".format(payload_uuid, file_uuid)) # stuff and stuff (= logging.warning('next uuid returning from Erlang') logging.warning(response['uuid']) # just for now log the shit out of what we're processing except Exception as error: logging.warning(error) logging.warning(message.get('count')) @gen.coroutine def process_payload_uuid(uuid): ''' process new upload uuid ''' process = Popen(['file', '/tmp/{0}'.format(uuid), "."], stdout=PIPE) (output, err) = process.communicate() # Yo, wait for some shit. exit_code = process.wait() try: logging.warning(output) command = 'split -a 5 -b 256K -d /tmp/{0} /tmp/{1}.part'.format(uuid, uuid) status = call(command, shell=True) except FileNotFoundError: logging.warning('there is not file {0}'.format(uuid)) @gen.coroutine def check_new_uploads(account="pebkac", status="upload", page_num=1, page_size=100): ''' Check new uploads ''' domain = 'nonsense.ws' search_index = 'sloth_file_index' query = 'uuid_register:*' filter_query = 'account_register:{0}&fq=status_register:{1}'.format(account,status) start_num = page_size * (page_num - 1) url = get_search_list(domain, search_index, query, filter_query, start_num, page_size) got_response = [] message = { 'count': 0, 'page': page_num, 'results': [] } # ignore riak fields IGNORE_ME = ["_yz_id","_yz_rk","_yz_rt","_yz_rb"] # hopefully asynchronous handle function request def handle_request(response): ''' Request Async Handler ''' if response.error: logging.error(response.error) got_response.append({'error':True, 'message': response.error}) else: got_response.append(json.loads(response.body)) try: http_client.fetch( url, callback=handle_request ) while len(got_response) == 0: # Yo, don't be careless with the time! yield gen.sleep(0.0020) # get stuff from response stuff = got_response[0] if stuff['response']['numFound']: message['count'] += stuff['response']['numFound'] for doc in stuff['response']['docs']: message['results'].append(clean_response(doc, IGNORE_ME)) else: logging.error('there is probably something wrong!') except Exception as error: logging.warning(error) logging.warning(message.get('count')) for yo in message.get('results'): ooo = yield process_payload_uuid(yo.get('payload')) aaa = yield process_uuid_parts(yo.get('payload'), yo.get('uuid')) @gen.coroutine def check_erlang_node( home='/opt/sloth', erlang_release='/opt/sloth/_rel/sloth_release/bin/sloth_release', circusd="/etc/init.d/circusd", max_count=5): ''' Checks for an active Erlang/OTP node ''' os.environ['HOME'] = home von_count = 0 running = False process = Popen([erlang_release, "ping", "."], stdout=PIPE) (output, err) = process.communicate() exit_code = process.wait() if b'not responding to pings' in output: logging.error(output) process = Popen([erlang_release, "start", "."], stdout=PIPE) (output, err) = process.communicate() exit_code = process.wait() logging.error(output) elif b'pong' in output: if not running: logging.warning('pong!') running = True else: von_count += 1 if von_count > max_count: circus = Popen([circusd, "stop", "."], stdout=PIPE) (output, err) = circus.communicate() logging.error('Crash after trying {0} times!'.format(max_count))
# lloyd = { # "name": "Lloyd", # "homework": [90.0, 97.0, 75.0, 92.0], # "quizzes": [88.0, 40.0, 94.0], # "tests": [75.0, 90.0] # } # alice = { # "name": "Alice", # "homework": [100.0, 92.0, 98.0, 100.0], # "quizzes": [82.0, 83.0, 91.0], # "tests": [89.0, 97.0] # } # tyler = { # "name": "Tyler", # "homework": [0.0, 87.0, 75.0, 22.0], # "quizzes": [0.0, 75.0, 78.0], # "tests": [100.0, 100.0] # } # students = [lloyd, alice, tyler] # # for student in students: # # print(student['name']) # # print(student['homework']) # # print(student['quizzes']) # # print(student['tests']) # def average(numbers): # quantity = len(numbers) # total = sum(numbers) # result = float(total) / quantity # return result # average(alice["tests"]) # def get_average(student): # homework = average(student['homework']) # quizzes = average(student['quizzes']) # tests = average(student['tests']) # homework *= 0.1 # quizzes *= 0.3 # tests *= 0.6 # result = homework + quizzes + tests # return result # # get_average(alice) # def get_letter_grade(score): # if score >= 90: # return 'A' # elif score >= 80: # return 'B' # elif score >= 70: # return 'C' # elif score >= 60: # return 'D' # else: # return 'F' # def get_class_average(class_list): # results = [] # for student in class_list: # results.append(get_average(student)) # return average(results) # print(get_class_average(students)) # print(get_letter_grade(get_class_average(students))) # score = {"a": 1, "c": 3, "b": 3, "e": 1, "d": 2, "g": 2, # "f": 4, "i": 1, "h": 4, "k": 5, "j": 8, "m": 3, # "l": 1, "o": 1, "n": 1, "q": 10, "p": 3, "s": 1, # "r": 1, "u": 1, "t": 1, "w": 4, "v": 4, "y": 4, # "x": 8, "z": 10} # def scrabble_score(word = input().lower()): # totalScore = 0 # for str1 in word: # for letter in score: # if str1 == letter: # totalScore += score[letter] # return totalScore # print(scrabble_score()) def censor(text, word): text = list(text) censor('Hey you sun of a freak!', 'freak')
class Turn: def __init__(self, coordinates): self.coordinates = coordinates
#!/usr/bin/python line = raw_input("please input a sentence:") words = line.split(" ") if "red" in word: "red" = "xxx" print word
""" train_mnist_classifier.py Trains MNIST/FMNIST classifier for use in figure-generating scripts. """ from load_cifar import * from load_mnist import * import numpy as np import torch.nn as nn from torch.optim.lr_scheduler import StepLR import torch import time import scipy.io as sio import os # --- options --- dataset = 'cifar' # 'mnist' or 'fmnist' class_use = np.array([3,5]) # classes to select from dataset batch_size = 64 # training batch size c_dim = 3 # number of channels in the input image lr = 0.0005 # sgd learning rate momentum = 0.9 # sgd momentum term img_size = 32 # size of each image dimension gamma = 0.999 # adam momentum term epochs = 100 # number of training epochs save_folder_root = './pretrained_models' class_use_str = np.array2string(class_use) y_dim = class_use.shape[0] newClass = range(0,y_dim) test_size = 100 save_folder = os.path.join(save_folder_root, dataset + '_' + class_use_str[1:(len(class_use_str)-1):2] + '_classifier') # --- load data --- if not os.path.exists(save_folder): os.makedirs(save_folder) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if dataset == 'mnist': trX, trY, tridx = load_mnist_classSelect('train',class_use,newClass) vaX, vaY, vaidx = load_mnist_classSelect('val',class_use,newClass) teX, teY, teidx = load_mnist_classSelect('test',class_use,newClass) elif dataset == 'fmnist': trX, trY, tridx = load_fashion_mnist_classSelect('train',class_use,newClass) vaX, vaY, vaidx = load_fashion_mnist_classSelect('val',class_use,newClass) teX, teY, teidx = load_fashion_mnist_classSelect('test',class_use,newClass) elif dataset == 'cifar': trX, trY, tridx = load_cifar_classSelect('train', class_use, newClass) vaX, vaY, vaidx = load_cifar_classSelect('val', class_use, newClass) teX, teY, teidx = load_cifar_classSelect('test', class_use, newClass) else: print('dataset must be ''mnist'' or ''fmnist''!') # --- train --- batch_idxs = len(trX) // batch_size batch_idxs_val = len(vaX) // test_size ce_loss = nn.CrossEntropyLoss() # from models.CNN_classifier import CNN classifier = CNN(y_dim, c_dim).to(device) optimizer = torch.optim.SGD(classifier.parameters(), lr=lr, momentum=momentum) scheduler = StepLR(optimizer, step_size=1, gamma=gamma) # loss_total = np.zeros((epochs*batch_idxs)) test_loss_total = np.zeros((epochs)) percent_correct = np.zeros((epochs)) start_time = time.time() counter = 0 for epoch in range(0,epochs): for idx in range(0, batch_idxs): batch_labels = torch.from_numpy(trY[idx*batch_size:(idx+1)*batch_size]).long().to(device) batch_images = trX[idx*batch_size:(idx+1)*batch_size] batch_images_torch = torch.from_numpy(batch_images) batch_images_torch = batch_images_torch.permute(0,3,1,2).float() batch_images_torch = batch_images_torch.to(device) optimizer.zero_grad() prob_output,output = classifier(batch_images_torch) loss = ce_loss(output,batch_labels) loss.backward() optimizer.step() loss_total[counter] = loss.item() counter = counter+1 print ("[Train Epoch %d/%d] [Batch %d/%d] time: %4.4f [loss: %f]" % (epoch, epochs, idx, batch_idxs,time.time() - start_time, loss.item())) # compute validation loss test_loss = 0.0 correct = 0 for idx in range(0, batch_idxs_val): val_labels = torch.from_numpy(vaY[idx*test_size:(idx+1)*test_size]).long().to(device) val_images = vaX[idx*test_size:(idx+1)*test_size] val_images_torch = torch.from_numpy(val_images) val_images_torch = val_images_torch.permute(0,3,1,2).float() val_images_torch = val_images_torch.to(device) prob_output_val,output_val = classifier(val_images_torch) pred = prob_output_val.argmax(dim=1) test_loss += ce_loss(output_val,val_labels) correct += pred.eq(val_labels.view_as(pred)).sum().item()/float(test_size) test_loss = test_loss/batch_idxs_val percent_correct[epoch] = 100.0*correct/batch_idxs_val print ("[Test Epoch %d/%d] [loss: %f] [corr: %f]" % (epoch, epochs, test_loss.item(), percent_correct[epoch])) test_loss_total[epoch] = test_loss.item() scheduler.step() torch.save({ 'step': counter, 'epoch': epoch, 'batch': idx, 'model_state_dict_classifier': classifier.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss_total, }, os.path.join(save_folder, 'model.pt')) sio.savemat(os.path.join(save_folder, 'training-info.mat'), {'loss_total' : loss_total[:counter], 'percent_correct' : percent_correct[:epoch], 'test_loss_total' : test_loss_total[:epoch], 'class_use' : class_use, 'batch_size' : batch_size, 'c_dim' : c_dim, 'lr' : lr, 'momentum' : momentum, 'img_size' : img_size, 'gamma' : gamma, 'epochs' : epochs})
import os import re from googleapiclient.discovery import build api_key = '<API KEY>' yt_service = build('youtube','v3', developerKey=api_key) request = yt_service.commentThreads().list( part='snippet', videoId='<videoID>', maxResults=100 ) response = request.execute() num = 0 for comment in response['items']: filepath = "outputfiles/output"+str(num)+".py" with open(filepath, "a", encoding="utf-8") as output_file: comment_text = comment['snippet']['topLevelComment']['snippet']['textDisplay'] comment_text = comment_text.replace("&quot;", "\"").replace("<br />","\n").replace("&gt;", ">").replace("&lt;", "<").replace("&#39;", "'").replace("<i>", "_").replace("</i>", "_").replace("”","\"").replace("“","\"") comment_text = re.sub('<.*?>','', comment_text) output_file.write("try:\n") lines = str(comment_text).splitlines() for line in lines: if line != '\n': output_file.write("\t"+line +"\n") output_file.write("except:" + "\n \tprint(\"Not valid python code\")") num = num + 1 for script in os.listdir('outputFiles'): try: exec(open("outputFiles/"+script).read()) except: print("Invalid python code or syntax") os.remove("outputFiles/"+script)
import os, re from arcnagios.jobplugin import JobPlugin from arcnagios.nagutils import OK, CRITICAL, UNKNOWN _presence_script = """\ missing= for prog in %(required_programs)s; do type -p $prog >/dev/null 2>&1 || missing="$missing $prog" done if test -n "$missing"; then echo "__missing$missing" >%(output_file)s else %(script_line)s err=$? [ $err -eq 0 ] || echo "__exit $err" >>%(output_file)s fi """ _plain_script = """\ %(script_line)s err=$? [ $err -eq 0 ] || echo "__exit $err" >>%(output_file)s """ class ScriptedJobPlugin(JobPlugin): _missing_re = re.compile(r'__missing\s+(.*)') _exit_re = re.compile(r'__exit\s+(\d+)') _status_re = re.compile(r'__status\s+(\d+)\s+(.*)') _log_re = re.compile(r'__log\s+(\d+)\s+(.*)') def write_script(self, fh): script_line = self.getconf('script_line') fh.write('# Scripted test %s\n'%self.service_description) output_file = self.getconf('output_file') env = { 'script_line': script_line, 'output_file': output_file } if self.hasconf('required_programs'): env['required_programs'] = self.getconf('required_programs') fh.write(_presence_script % env) else: fh.write(_plain_script % env) fh.write('\n') def staged_inputs(self): return self.getconf_strlist('staged_inputs', default = []) def staged_outputs(self): return [(self.getconf('output_file'), None, [])] def check(self, report, jobdir, stored_urls): output_file = os.path.join(jobdir, self.getconf('output_file')) if self.hasconf('output_pattern'): pattern_re = re.compile(self.getconf('output_pattern')) else: pattern_re = None try: fh = open(output_file) except IOError: report.update_status(UNKNOWN, 'Did not receive output file %s.'%output_file) return for ln in fh: if pattern_re: mo = re.match(pattern_re, ln) if mo: msg = self.getconf('status_ok', vars = mo.groupdict()) report.update_status(OK, msg) fh.close() return mo = re.match(self._missing_re, ln) if mo: msg = 'Missing program(s) %s'%mo.group(1) report.update_status(CRITICAL, msg) break mo = re.match(self._exit_re, ln) if mo: code = int(mo.group(1)) if code: msg = 'Script exited with code %d.'%code report.update_status(CRITICAL, msg) continue mo = re.match(self._status_re, ln) if mo: report.update_status(int(mo.group(1)), mo.group(2)) continue mo = re.match(self._log_re, ln) if mo: report.log.log(int(mo.group(1)), mo.group(2)) continue fh.close() if pattern_re: report.update_status(CRITICAL, self.getconf('status_critical', default = 'Pattern not found.'))
import sys reu_path = "C:/Users/Daway Chou-Ren/Documents/REU/" inputFile = sys.argv[1] filename = open(reu_path + inputFile, 'r') num_lines = 0 num_blank_lines = 0 newline_bool = 0 for line in filename: print len(line) # print line if line in ['\n', '\r\n']: num_blank_lines += 1 num_lines += 1 with open("C:/Users/Daway Chou-Ren/Documents/REU/" + inputFile[:-4] + "_file_stats.txt", "w") as myfile: myfile.write("lines:\t" + str(num_lines) + "\n") myfile.write("blank lines:\t" + str(num_blank_lines) + "\n")
import os paths = { "convolution": [ "../convolution/xor_convolution.hpp", "../convolution/and_or_convolution.hpp", "../convolution/gcd_lcm_convolution.hpp", "../convolution/fft.hpp", ], "data-structure": [ "../data-structure/segtree/segment_tree.cpp", "../data-structure/segtree/lazy_segment_tree.cpp", "../data-structure/fenwick_tree.cpp", "../data-structure/range_tree.cpp", "../data-structure/unionfind/union_find.cpp", "../data-structure/unionfind/weighted_union_find.cpp", "../data-structure/cht/convex_hull_trick.cpp", # "../data-structure/bst/treap.cpp", # "../data-structure/kd_tree.cpp", # "../data-structure/bit_vector.cpp", # "../data-structure/wavelet_matrix.cpp", # "../data-structure/binary_trie.cpp", # "../data-structure/disjoint_sparse_table.cpp", # "../data-structure/kd_tree.cpp", # "../data-structure/persistent_array.cpp", # "../data-structure/sparse_table.cpp", # "../data-structure/slope_trick.cpp", # "../data-structure/persistent_stack.cpp", # "../data-structure/persistent_queue.cpp", # "../data-structure/segtree/segment_tree_2d.hpp", # "../data-structure/segtree/segment_tree_beats.cpp", # "../data-structure/segtree/persistent_segment_tree.cpp", # "../data-structure/segtree/dual_segment_tree.cpp", # "../data-structure/unionfind/undoable_union_find.cpp", # "../data-structure/unionfind/partially_persistent_union_find.cpp", # "../data-structure/bst/rbst.cpp", # "../data-structure/bst/lazy_treap.cpp", # "../data-structure/bst/red_black_tree.cpp", # "../data-structure/bst/splay_tree.cpp", # "../data-structure/cht/convex_hull_trick_binsearchtree.hpp", # "../data-structure/cht/li_chao_tree.cpp", ], "flow": [ "../flow/ford_fulkerson.cpp", "../flow/dinic.cpp", "../flow/min_cost_flow.cpp", ], "geometry": [ "../geometry/geometry.hpp", "../geometry/intersection.hpp", "../geometry/dist.hpp", "../geometry/intersect.hpp", "../geometry/tangent.hpp", "../geometry/polygon.hpp", "../geometry/triangle.hpp", "../geometry/convex_hull.hpp", "../geometry/bisector.hpp", "../geometry/geometry3d.hpp", ], "graph": [ "../graph/scc.cpp", "../graph/lowlink.cpp", # "../graph/topological_sort.cpp", # "../graph/mst.cpp", # "../graph/range_edge_graph.cpp", # "../graph/enumerate_cliques.cpp", # "../graph/chromatic_number.cpp", # "../graph/enumerate_triangles.cpp", # "../graph/general_matching.hpp", # "../graph/two_edge_connected_components.cpp", # "../graph/bipartite_matching.hpp", # "../graph/bipartite_edge_coloring.hpp", # "../graph/shortest_path.cpp", # "../graph/edge.cpp", # "../graph/eulerian_path.hpp", # "../graph/maximum_weight_independent_set.hpp", # "../graph/dominator_tree.cpp", # "../graph/biconnected_components.cpp", # "../graph/maximum_independent_set.cpp", # "../graph/dm_decomposition.cpp", # "../graph/pseudotree_cycle.hpp", # "../graph/offline_dynamic_connectivity.cpp", # "../graph/minimum_steiner_tree.hpp", # "../graph/assignment.hpp", ], "math": [ "../math/garner.cpp", "../math/euler_totient.cpp", "../math/mod_arithmetic.cpp", "../math/floor_sum.cpp", "../math/polynomial.cpp", # "../math/bostan_mori.hpp", # "../math/berlekamp_massey.cpp", ], "misc": [ "../misc/mo.cpp", "../misc/interval_set.hpp", "../sat/twosat.hpp", # "../misc/memo.hpp", ], "string": [ "../string/rolling_hash.cpp", "../string/suffix_array.cpp", "../string/lcp_array.cpp", "../string/z_array.cpp", "../string/trie.cpp", "../string/kmp.cpp", "../string/aho_corasick.cpp", ], "tree": [ "../tree/lca.cpp", "../tree/tree_isomorphism.hpp", "../tree/centroid_decomposition.hpp", "../tree/hld.cpp", "../tree/tree_diameter.cpp", "../tree/rerooting.cpp", "../tree/link_cut_tree.cpp", # "../tree/auxiliary_tree.cpp", ], } def list_all_files(): sections = ["convolution", "data-structure", "dp", "flow", "geometry", "graph", "math", "misc", "sat", "string", "tree"] # sections = ["data-structure"] # sections = ["convolution"] res = ["paths = {"] for sec in sections: res.append(f" \"{sec}\": [") for root, dirs, files in os.walk(os.path.join("..", sec)): for f in files: if ".cpp" in f or ".hpp" in f: res.append(f" \"{os.path.join(root, f)}\",") res.append(" ],") res.append("}") print("\n".join(res)) if __name__ == "__main__": list_all_files()
from sklearn.tree import DecisionTreeClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sknn.mlp import Classifier, Layer from sklearn.metrics import confusion_matrix import pandas as pd import os.path import time from sklearn.externals import joblib __author__ = 'timothymiko' class DataHandler: def __init__(self): if os.path.exists('data_models/input/x_all.pkl'): self.X_all = joblib.load('data_models/input/x_all.pkl') self.Y_all = joblib.load('data_models/input/y_all.pkl') self.X_train = joblib.load('data_models/input/x_train.pkl') self.Y_train = joblib.load('data_models/input/y_train.pkl') self.X_test = joblib.load('data_models/input/x_test.pkl') self.Y_test = joblib.load('data_models/input/y_test.pkl') else: # Import the data df = pd.read_csv('data_models/input/train.csv') # Clean the data up df['Transmission'] = df['Transmission'].map({"AUTO": "AUTO", "MANUAL": "MANUAL", "Manual": "MANUAL"}) for column in ['Size', 'Make', 'Color', 'Model', 'Transmission', 'WheelType']: df[column] = df[column].astype('category') # Select the most influential columns determined in analysis.py feature_cols = [col for col in df.columns if col in ['VehOdo', 'VehBCost', 'Color', 'WheelType', 'WarrantyCost', 'Model', 'SubModel', 'VehicleAge']] df_filtered = df[feature_cols] # Drop rows that contain any nans #df_filtered = df_filtered.dropna(axis=1, how='any') # transform categorical attributes into numerical attributes df_filtered = pd.get_dummies(df_filtered) df_filtered_good = df_filtered[(df.IsBadBuy == 0)] df_filtered_bad = df_filtered[(df.IsBadBuy == 1)] y = df['IsBadBuy'] # Separate into training and test data train_num_good_samples = 42500 train_num_bad_samples = 7500 self.X_all = df_filtered self.Y_all = y self.X_train = pd.concat([df_filtered_good.sample(n=train_num_good_samples), df_filtered_bad.sample(n=train_num_bad_samples)], axis=0) self.Y_train = y[self.X_train.index.values.tolist()] self.X_test = df_filtered.drop(self.X_train.index.values.tolist()) self.Y_test = y[self.X_test.index.values.tolist()] joblib.dump(self.X_all, 'data_models/input/x_all.pkl') joblib.dump(self.Y_all, 'data_models/input/y_all.pkl') joblib.dump(self.X_train, 'data_models/input/x_train.pkl') joblib.dump(self.Y_train, 'data_models/input/y_train.pkl') joblib.dump(self.X_test, 'data_models/input/x_test.pkl') joblib.dump(self.Y_test, 'data_models/input/y_test.pkl') class BaseEstimator: def __init__(self, clf, name, recreate_model=False): self.classifier = clf self.name = name self.model_path = 'data_models/{0}.pkl'.format(self.name) self.recreate_model = recreate_model def run(self): data = DataHandler() x_train = data.X_train y_train = data.Y_train x_test = data.X_test y_test = data.Y_test if os.path.exists(self.model_path) and not self.recreate_model: self.classifier = joblib.load(self.model_path) else: self.classifier.fit(x_train, y_train) joblib.dump(self.classifier, self.model_path) result = self.classifier.predict(x_test) print '{0} Results:'.format(self.name) print 'Overall Accuracy: {0:3f}%'.format(self.classifier.score(x_test, y_test) * 100) x_test_bad = x_test[y_test == 1] y_test_bad = y_test[x_test_bad.index.values.tolist()] print 'Bad Accuracy: {0:3f}%'.format(self.classifier.score(x_test_bad, y_test_bad) * 100) x_test_good = x_test[y_test == 0] y_test_good = y_test[x_test_good.index.values.tolist()] print 'Good Accuracy: {0:3f}%'.format(self.classifier.score(x_test_good, y_test_good) * 100) print 'Confusion matrix:' print confusion_matrix(result, y_test, labels=[0, 1]) start_time = time.time() RandomForest = False NaiveBayes = False NeuralNetwork = False DecisionTree = False Adaboost = True if RandomForest: BaseEstimator( RandomForestClassifier(), "Random Forest" ).run() if NaiveBayes: BaseEstimator( GaussianNB(), "Naive Bayes" ).run() if DecisionTree: BaseEstimator( DecisionTreeClassifier(class_weight={0: 1, 1: 7}), "Decision Tree" ).run() if Adaboost: BaseEstimator( AdaBoostClassifier( base_estimator=DecisionTreeClassifier(class_weight={0: 7, 1: 1}), n_estimators=100 ), "Adaboost" ).run() if NeuralNetwork: data = DataHandler() x_train = data.X_train.as_matrix() y_train = data.Y_train.as_matrix() x_test = data.X_test.as_matrix() y_test = data.Y_test.as_matrix() RecreateModel = False if os.path.exists('data_models/neural_network.pkl') and RecreateModel: nn = joblib.load('data_models/neural_network.pkl') else: nn = Classifier( layers=[ Layer("Sigmoid", units=10), Layer("Softmax")], learning_rate=0.9, n_iter=25) nn.fit(x_train, y_train) joblib.dump(nn, 'data_models/neural_network.pkl') result = nn.predict(x_test) print 'Neural Network Results:' print 'Overall Accuracy: {0:3f}%'.format(nn.score(x_test, y_test) * 100) x_test_bad = data.X_test[data.Y_test == 1] y_test_bad = data.Y_test[x_test_bad.index.values.tolist()] print 'Bad Accuracy: {0:3f}%'.format(nn.score(x_test_bad.as_matrix(), y_test_bad.as_matrix()) * 100) x_test_good = data.X_test[data.Y_test == 0] y_test_good = data.Y_test[x_test_good.index.values.tolist()] print 'Good Accuracy: {0:3f}%'.format(nn.score(x_test_good.as_matrix(), y_test_good.as_matrix()) * 100) print 'Confusion matrix:' print confusion_matrix(result, y_test, labels=[0, 1]) run_time = time.time() - start_time print 'Total time elapsed is {0} milliseconds'.format(run_time)
import argparse import json import os import time from loop import Logger, StressRay def run_experiment(experiment_name, workload, system_config, log_directory): log_filename = os.path.join(log_directory, "{}_{}_{}_{}.log".format( workload["name"], system_config["num_nodes"], system_config["num_workers"], system_config["shm_size"])) print log_filename with Logger(log_filename) as logger: logger.log("experiment", { "experiment_name" : experiment_name, "system_config" : system_config, "workload" : workload }) s = StressRay(logger) s.start_ray(shm_size=system_config["shm_size"], mem_size=system_config["mem_size"], num_workers=system_config["num_workers"], num_nodes=system_config["num_nodes"]) time.sleep(2) execution_time_limit = workload["execution_time_limit"] if "execution_time_limit" in workload else None s.iterate_workload(workload["workload_script"], iteration_target=workload["iteration_target"], time_target=workload["time_target"], execution_time_limit=execution_time_limit) s.stop_ray() if __name__ == "__main__": parser = argparse.ArgumentParser(prog="matrix.py", description="Ray performance and stress testing matrix") parser.add_argument("configuration", metavar="config.json", help="json configuration file") parser.add_argument("--experiment-name", help="descriptive name for this experiment") parser.add_argument("--log-directory", default="logs", help="directory for log files") args = parser.parse_args() with open(args.configuration) as f: config = json.load(f) if args.experiment_name: experiment_name = args.experiment_name else: experiment_name = os.path.splitext(os.path.basename(args.configuration))[0] log_directory = os.path.join(args.log_directory, experiment_name, time.strftime("%Y%m%d_%H%M%S")) if not os.path.exists(log_directory): os.makedirs(log_directory) print("Output to {}".format(log_directory)) for system_config in config["system_configs"]: for workload in config["workloads"]: run_experiment(experiment_name, workload, system_config, log_directory) print("Completed with output to {}".format(log_directory))
str="hello world" print(str[3:]) print(str[::2])
from my_module import compute print(compute([5, 7, 11])) from math import exp as exponential print(exponential(2))
import unittest from katas.kyu_8.get_planet_name_by_id import get_planet_name class GetPlanetNameTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(get_planet_name(2), 'Venus') def test_equals_2(self): self.assertEqual(get_planet_name(5), 'Jupiter') def test_equals_3(self): self.assertEqual(get_planet_name(3), 'Earth') def test_equals_4(self): self.assertEqual(get_planet_name(4), 'Mars') def test_equals_5(self): self.assertEqual(get_planet_name(8), 'Neptune') def test_equals_6(self): self.assertEqual(get_planet_name(1), 'Mercury')
import pymysql conn =pymysql.connect(database="db1",user="susan",password="12345",host="localhost") cur=conn.cursor() #create database cur.execute("CREATE TABLE db1.users(id int auto_increment primary key, name text, age int, gender text, address text)") #to store user data name = "Uma" age = 25 gender = "F" address = "Karnataka" data={'name':name,'age':age,'gender':gender,'address':address} print(data) # Saving data to DB cur.execute("INSERT INTO users (name,age,gender,address) VALUES (%(name)s,%(age)s,%(gender)s,%(address)s);",data) conn.commit() print("saved to db") #reading data from DB cur.execute("SELECT * FROM users;") #get one row data1=cur.fetchone() #get all rows data2=cur.fetchall() print(data1) print(data2)
# Generated by Django 2.2.1 on 2019-05-15 11:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('portalapp', '0027_loggedissue_handle'), ] operations = [ migrations.AlterField( model_name='loggedissue', name='handle', field=models.CharField(blank=True, max_length=25, null=True), ), ]
import requests payload = {"alyssa": "hello", "python": "hello python"} r = requests.post("http://httpbin.org/post", params=payload) print(r.text)
from ._console_gui import ConsoleGui
# 정수 배열 numbers가 주어집니다. numbers에서 서로 다른 인덱스에 있는 두 개의 수를 뽑아 더해서 만들 수 있는 모든 수를 배열에 오름차순으로 담아 return 하도록 solution 함수를 완성해주세요. # 제한사항 # numbers의 길이는 2 이상 100 이하입니다. # numbers의 모든 수는 0 이상 100 이하입니다. # 입출력 예 # numbers result # [2,1,3,4,1] [2,3,4,5,6,7] # [5,0,2,7] [2,5,7,9,12] def solution(numbers): answer = [] for i in range(len(numbers)): for j in range(i+1,len(numbers)): answer.append(numbers[i]+numbers[j]) return sorted(set(answer))
# -*- coding: utf-8 -*- """ Created on Wed Nov 4 09:28:50 2020 @author: vernika """ '''OpenCV stores RGB images as NumPy arrays in reverse channel order. Instead of storing an image in RGB order, it instead stores the image in BGR order; thus we unpack the tuple in reverse order.''' import cv2 import argparse import numpy as np def show_img(img): cv2.imshow("canvas",img) cv2.waitKey(0) return ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required = True, help = "path to image") args = vars(ap.parse_args()) load_image = cv2.imread(args["image"]) b,g,r = cv2.split(load_image) show_img(b) show_img(g) show_img(r) merged = cv2.merge([b,g,r]) show_img(merged) '''in order to check the original color of the channel, we first split the channels as shown above then we use a zero mask of same dimension to show color. basically If we want to visualize red color, then we will use cv2.merge([0,0,r]) setting blue and green channel as zero''' zeros = np.zeros(load_image.shape[:2], dtype = 'uint8') merged = cv2.merge([zeros,zeros,r]) print("Visualizing red channel") show_img(merged)
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019/7/23 8:31 PM # @Author : ZhangHao # @File : card_qualification_data.py # @Desc : 为棋牌无资质数据生成特征 import base_generator import codecs from utils.logger import Logger log = Logger().get_logger() class CardDataFeatureGenerator(base_generator.BaseFeatureGenerator): def __init__(self, word_seg_func, stopwords_path=None, encoding="utf-8", ngram=3, feature_min_length=2): base_generator.BaseFeatureGenerator.__init__(self, word_seg_func, stopwords_path, encoding, ngram, feature_min_length) self.encoding = encoding def run(self, data_path): """ 遍历文件记录 生成各记录的特征 :param data_path: :param encoding: :return: 列表,列表元素三元组信息:(标签,特征,额外信息(原数据、userid等)) """ label_data_list = list() with codecs.open(data_path, "r", self.encoding) as rf: for index, line in enumerate(rf): parts = line.strip("\n").split('\t') # 棋牌数据有三列 标签、物料、userid label = parts[0] contents = parts[1].split("||") info = "\t".join(parts[1:]) # 获取该记录所有特征 feature_set = set() for content in contents: feature_set |= self.gen_ngram_feature(content) label_data_list.append((label, " ".join(feature_set), info)) if index % 10000 == 0: log.info("process line #%d" % index) log.info("content : %s" % parts[1]) log.info("="*100) log.info("seg result : %s" % " ".join(feature_set)) return label_data_list if __name__ == "__main__": generator = CardDataFeatureGenerator("jieba") data_list = generator.run("../../data/origin_data/test.txt") for data in data_list: print("\t".join(data).encode("utf-8"))
""" ********************************************************************* This file is part of: The Acorn Project https://wwww.twistedfields.com/research ********************************************************************* Copyright (c) 2019-2021 Taylor Alexander, Twisted Fields LLC Copyright (c) 2021 The Acorn Project contributors (cf. AUTHORS.md). Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ********************************************************************* """ import sys sys.path.append('../vehicle') import corner_actuator from corner_actuator import COUNTS_PER_REVOLUTION, OdriveConnection import serial import time import math from odrive.utils import dump_errors from evdev import InputDevice, list_devices, categorize, ecodes, KeyEvent from steering import calculate_steering import zmq import pickle import click import argparse from multiprocessing import Process import os import fibre import motors control = motors.AcornMotorInterface(manual_control=True) control.odrive_connections = [ OdriveConnection(name='front_right', serial="335E31483536", path="/dev/ttySC1"), #OdriveConnection(name='front_left', serial="335B314C3536", path="/dev/ttySC0"), #OdriveConnection(name='rear_right', serial="3352316E3536", path="/dev/ttySC2"), #OdriveConnection(name='rear_left', serial="205F3882304E", path="/dev/ttySC3") ] control.run_debug_control(enable_steering=True, enable_traction=False)
class Call(object): def __init__(self, id, name, phone, time, reason): self.id = id self.name = name self.phone = phone self.time = time self.reason = reason def displayAll(self): return 'ID: {} NAME: {} PHONE: {} TIME: {} REASON: {}'.format( self.id, self.name, self.phone, self.time, self.reason)
class BigSur(): def __init__(self, name): self.name = name def say_hello(self): print(self.name) BigSur = BigSur("macOS") BigSur.name = "macOS" BigSur.say_hello()
''' slots A Python library to perform simple multi-armed bandit analyses. Scenarios: - Run MAB test on simulated data (N bandits), default epsilon-greedy test. mab = slots.MAB(probs = [0.1,0.15,0.05]) mab.run(trials = 10000) mab.best # Bandit with highest probability after T trials - Run MAB test on "real" payout data (probabilites unknown). mab = slots.MAB(payouts = [0,0,0,1,0,0,0,0,0,....]) mab.run(trials = 10000) # Max is length of payouts ''' import numpy as np class MAB(object): ''' Multi-armed bandit test class. ''' def __init__(self, num_bandits=3, probs=None, payouts=None, live=True, stop_criterion={'criterion': 'regret', 'value': 0.1}): ''' Parameters ---------- num_bandits : int default is 3 probs : np.array of floats payout probabilities payouts : np.array of floats If `live` is True, `payouts` should be None. live : bool Whether the use is for a live, online trial. stop_criterion : dict Stopping criterion (str) and threshold value (float). ''' self.choices = [] if not probs: if not payouts: if live: self.bandits = Bandits(live=True, payouts=np.zeros(num_bandits), probs=None) else: self.bandits = Bandits(probs=[np.random.rand() for x in range(num_bandits)], payouts=np.ones(num_bandits), live=False) else: self.bandits = Bandits(probs=[np.random.rand() for x in range(len(payouts))], payouts=payouts, live=False) num_bandits = len(payouts) else: if payouts: self.bandits = Bandits(probs=probs, payouts=payouts, live=False) num_bandits = len(payouts) else: self.bandits = Bandits(probs=probs, payouts=np.ones(len(probs)), live=False) num_bandits = len(probs) self.wins = np.zeros(num_bandits) self.pulls = np.zeros(num_bandits) # Set the stopping criteria self.criteria = {'regret': self.regret_met} self.criterion = stop_criterion.get('criterion', 'regret') self.stop_value = stop_criterion.get('value', 0.1) # Bandit selection strategies self.strategies = ['eps_greedy', 'softmax', 'ucb', 'bayesian'] def run(self, trials=100, strategy=None, parameters=None): ''' Run MAB test with T trials. Parameters ---------- trials : int Number of trials to run. strategy : str Name of selected strategy. parameters : dict Parameters for selected strategy. Available strategies: - Epsilon-greedy ("eps_greedy") - Softmax ("softmax") - Upper confidence bound ("ucb") Returns ------- None ''' if trials < 1: raise Exception('MAB.run: Number of trials cannot be less than 1!') if not strategy: strategy = 'eps_greedy' else: if strategy not in self.strategies: raise Exception('MAB,run: Strategy name invalid. Choose from:' ' {}'.format(', '.join(self.strategies))) # Run strategy for n in range(trials): self._run(strategy, parameters) def _run(self, strategy, parameters=None): ''' Run single trial of MAB strategy. Parameters ---------- strategy : function parameters : dict Returns ------- None ''' choice = self.run_strategy(strategy, parameters) self.choices.append(choice) payout = self.bandits.pull(choice) if payout is None: print('Trials exhausted. No more values for bandit', choice) return None else: self.wins[choice] += payout self.pulls[choice] += 1 def run_strategy(self, strategy, parameters): ''' Run the selected strategy and retrun bandit choice. Parameters ---------- strategy : str Name of MAB strategy. parameters : dict Strategy function parameters Returns ------- int Bandit arm choice index ''' return self.__getattribute__(strategy)(params=parameters) # ###### ----------- MAB strategies ---------------------------------------#### def max_mean(self): """ Pick the bandit with the current best observed proportion of winning. Returns ------- int Index of chosen bandit """ return np.argmax(self.wins / (self.pulls + 0.1)) def bayesian(self, params=None): ''' Run the Bayesian Bandit algorithm which utilizes a beta distribution for exploration and exploitation. Parameters ---------- params : None For API consistency, this function can take a parameters argument, but it is ignored. Returns ------- int Index of chosen bandit ''' p_success_arms = [ np.random.beta(self.wins[i] + 1, self.pulls[i] - self.wins[i] + 1) for i in range(len(self.wins)) ] return np.array(p_success_arms).argmax() def eps_greedy(self, params): ''' Run the epsilon-greedy strategy and update self.max_mean() Parameters ---------- Params : dict Epsilon Returns ------- int Index of chosen bandit ''' if params and type(params) == dict: eps = params.get('epsilon') else: eps = 0.1 r = np.random.rand() if r < eps: return np.random.choice(list(set(range(len(self.wins))) - {self.max_mean()})) else: return self.max_mean() def softmax(self, params): ''' Run the softmax selection strategy. Parameters ---------- Params : dict Tau Returns ------- int Index of chosen bandit ''' default_tau = 0.1 if params and type(params) == dict: tau = params.get('tau') try: float(tau) except ValueError: 'slots: softmax: Setting tau to default' tau = default_tau else: tau = default_tau # Handle cold start. Not all bandits tested yet. if True in (self.pulls < 3): return np.random.choice(range(len(self.pulls))) else: payouts = self.wins / (self.pulls + 0.1) norm = sum(np.exp(payouts/tau)) ps = np.exp(payouts/tau)/norm # Randomly choose index based on CMF cmf = [sum(ps[:i+1]) for i in range(len(ps))] rand = np.random.rand() found = False found_i = None i = 0 while not found: if rand < cmf[i]: found_i = i found = True else: i += 1 return found_i def ucb(self, params=None): ''' Run the upper confidence bound MAB selection strategy. This is the UCB1 algorithm described in https://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf Parameters ---------- params : None For API consistency, this function can take a parameters argument, but it is ignored. Returns ------- int Index of chosen bandit ''' # UCB = j_max(payout_j + sqrt(2ln(n_tot)/n_j)) # Handle cold start. Not all bandits tested yet. if True in (self.pulls < 3): return np.random.choice(range(len(self.pulls))) else: n_tot = sum(self.pulls) payouts = self.wins / (self.pulls + 0.1) ubcs = payouts + np.sqrt(2*np.log(n_tot)/self.pulls) return np.argmax(ubcs) # ###------------------------------------------------------------------#### def best(self): ''' Return current 'best' choice of bandit. Returns ------- int Index of bandit ''' if len(self.choices) < 1: print('slots: No trials run so far.') return None else: return np.argmax(self.wins/(self.pulls+0.1)) def est_payouts(self): ''' Calculate current estimate of average payout for each bandit. Returns ------- array of floats or None ''' if len(self.choices) < 1: print('slots: No trials run so far.') return None else: return self.wins/(self.pulls+0.1) def regret(self): ''' Calculate expected regret, where expected regret is maximum optimal reward - sum of collected rewards, i.e. expected regret = T*max_k(mean_k) - sum_(t=1-->T) (reward_t) Returns ------- float ''' return (sum(self.pulls)*np.max(np.nan_to_num(self.wins/self.pulls)) - sum(self.wins)) / sum(self.pulls) def crit_met(self): ''' Determine if stopping criterion has been met. Returns ------- bool ''' if True in (self.pulls < 3): return False else: return self.criteria[self.criterion](self.stop_value) def regret_met(self, threshold=None): ''' Determine if regret criterion has been met. Parameters ---------- threshold : float Returns ------- bool ''' if not threshold: return self.regret() <= self.stop_value elif self.regret() <= threshold: return True else: return False # ## ------------ Online bandit testing ------------------------------ #### def online_trial(self, bandit=None, payout=None, strategy='eps_greedy', parameters=None): ''' Update the bandits with the results of the previous live, online trial. Next run a the selection algorithm. If the stopping criteria is met, return the best arm estimate. Otherwise return the next arm to try. Parameters ---------- bandit : int Bandit index payout : float Payout value strategy : string Name of update strategy parameters : dict Parameters for update strategy function Returns ------- dict Format: {'new_trial': boolean, 'choice': int, 'best': int} ''' if bandit is not None and payout is not None: self.update(bandit=bandit, payout=payout) else: raise Exception('slots.online_trial: bandit and/or payout value' ' missing.') if self.crit_met(): return {'new_trial': False, 'choice': self.best(), 'best': self.best()} else: return {'new_trial': True, 'choice': self.run_strategy(strategy, parameters), 'best': self.best()} def update(self, bandit, payout): ''' Update bandit trials and payouts for given bandit. Parameters ---------- bandit : int Bandit index payout : float Returns ------- None ''' self.choices.append(bandit) self.pulls[bandit] += 1 self.wins[bandit] += payout self.bandits.payouts[bandit] += payout class Bandits(): ''' Bandit class. ''' def __init__(self, probs, payouts, live=True): ''' Instantiate Bandit class, determining - Probabilities of bandit payouts - Bandit payouts Parameters ---------- probs: array of floats Probabilities of bandit payouts payouts : array of floats Amount of bandit payouts. If `live` is True, `payouts` should be an N length array of zeros. live : bool ''' if not live: # Only use arrays of equal length if len(probs) != len(payouts): raise Exception('Bandits.__init__: Probability and payouts ' 'arrays of different lengths!') self.probs = probs self.payouts = payouts self.live = False else: self.live = True self.probs = None self.payouts = payouts def pull(self, i): ''' Return the payout from a single pull of the bandit i's arm. Parameters ---------- i : int Index of bandit. Returns ------- float or None ''' if self.live: if len(self.payouts[i]) > 0: return self.payouts[i].pop() else: return None else: if np.random.rand() < self.probs[i]: return self.payouts[i] else: return 0.0 def info(self): pass
import random import numpy as np import game.bitboard as bitop PLAYER_BLACK = 0 PLAYER_WHITE = 1 OBSTACLE = 2 def empty_array(): arr = np.zeros((3, 8, 8), dtype=np.uint64) return arr def set_cell_state(arr, row, col, state): arr[state][row][col] = 1 def clear_cell_state(arr, row, col): arr[0][row][col] = 0 arr[1][row][col] = 0 arr[2][row][col] = 0 def initial_setup(obs_n=5): arr = empty_array() set_cell_state(arr, 3, 3, PLAYER_BLACK) set_cell_state(arr, 4, 4, PLAYER_BLACK) set_cell_state(arr, 3, 4, PLAYER_WHITE) set_cell_state(arr, 4, 3, PLAYER_WHITE) pool = list(range(8 * 8)) pool.remove(3 * 8 + 4) pool.remove(4 * 8 + 3) pool.remove(3 * 8 + 3) pool.remove(4 * 8 + 4) for i in random.sample(pool, obs_n): row, col = divmod(i, 8) set_cell_state(arr, row, col, OBSTACLE) return arr
# Paint calculator print('Paint Calculator'); print('Enter the wall size as width and height in meters or press enter to stop'); print('example:10,30'); #Variables walls =[]; # list of wall measurements in meter gallons = 1/350; # one gallon can cover 350 square meter total =0; #Total Gallons to buy # Get user inputs while True: s=input('Enter wall size:') if len(s)==0:break # verify inputs sqmtrs= s.split(',') if len(sqmtrs)<2: print('Invalid format') break # converting inputs to integers w= int(sqmtrs[0]) h= int(sqmtrs[1]) item=[w,h] walls.append(item) print(f'Adding: {item}') #calculting numbers print(f'You have entered: {walls}') for m in walls: w=m[0] h=m[1] area=w*h val=area*gallons total+=val print(f'You need to buy: {round(total,2)} gallons')
import serial, time, codecs import camabio import sys import cserial """ Command Packet PREFIX 0xAA55 CMD 0x0150 LEN 0 DATA Null CKS Check Sum Response Packet PREFIX 0x55AA RCM 0x0150 LEN 4 RET ERR_SUCCESS or ERR_FAIL DATA Null CKS Check Sum """ if len(sys.argv) <= 1: sys.exit(1) port = sys.argv[1] data = [0x55,0xaa,0x50,0x01,0x00,0x00,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x00] camabio.setChksum(data) cserial.writeAndRead(port,data)
# https://documen.tician.de/pycuda/tutorial.html import matplotlib.pyplot as plt plt.ion() import numpy as np import pycuda.driver as cuda import pycuda.autoinit from pycuda.compiler import SourceModule a=np.random.randn(4, 4).astype(np.float32) a_gpu=cuda.mem_alloc(a.nbytes) cuda.memcpy_htod(a_gpu, a) mod=SourceModule(""" __global__ void doublify (float* a){ int idx = ((threadIdx.x)+(((4)*(threadIdx.y)))); a[idx]*=(2); }""") func=mod.get_function("doublify") func(a_gpu, block=(4,4,1,)) a_doubled=np.empty_like(a) cuda.memcpy_dtoh(a_doubled, a_gpu) print(a_doubled) print(a)
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('occ_survey', '0029_auto_20150715_1359'), ] operations = [ migrations.CreateModel( name='ControlSetPoints', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('date_updated', models.DateTimeField(auto_now=True)), ('room', models.CharField(max_length=20)), ('lux_th', models.IntegerField(default=0)), ('upp_th', models.IntegerField(default=0)), ('td', models.IntegerField(default=0)), ('override', models.IntegerField(default=0)), ], options={ 'db_table': 'control_set_points', }, ), ]
#coding:utf-8 import sys import os reload(sys) sys.setdefaultencoding('utf-8') dir1='F:\\Git Repository\\weibo' file_name_list1 = os.listdir(dir1) followers=[] for name in file_name_list1: path=dir1+'\\'+name file = open(path) lines = file.readlines() for index in range(0, len(lines),1): item=lines[index].strip('\n').split(',') if len(item)==2: followers.append(item) # print followers # for follower in followers: # print follower[0],follower[1] # print '\n' dir2='F:\\Git Repository\\communityNumbers' file_name_list2= os.listdir(dir2) community={} for index1 in range(0, len(file_name_list2),1): path = dir2 + '\\' + file_name_list2[index1] file = open(path) lines = file.readlines() number=[] for id in lines: number.append(id.strip('\n')) community[index1]=number # print community groupFollowers={} for index in range(0,len(community),1): group=[] groupFollowers[index]=group for follower in followers: for index2 in range(0,len(community),1): if (follower[0] in community[index2])&(follower[1] in community[index2]): groupFollowers[index2].append(follower) for index in range(0,len(groupFollowers),1): file_name='F:\\Git Repository\\groupFollowersByCommunity\\followersInCommunity%d.csv'%(index) with open(file_name, "w") as f: for follower in groupFollowers[index]: f.write(follower[0]+','+follower[1]+'\n') print 'file%d done'%(index)
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import time import traceback from multiprocessing import Process import requests import yaml from flask import Blueprint, json, current_app, request from .tools import safe_get, safe_value from .ssr import SSR path = os.path.dirname(os.path.abspath(__file__)) blueprint = Blueprint(os.path.basename(path), __name__) config = None @blueprint.route('/') def index(): try: conf = get_config() if conf is None: return json.dumps({'code': -500, 'msg': 'no config'}) if request.args.get('token', '') != conf.get('token', ''): return json.dumps({'code': -101, 'msg': 'token error'}) ssr_list = ssr_load() if ssr_list is None: return json.dumps({'code': 100, 'msg': 'no SSR'}) return json.dumps({'code': 0, 'data': {'data': ssr_list}, 'msg': 0}) except Exception as e: current_app.logger.error(traceback.format_exc()) return json.dumps({'code': -500, 'msg': repr(e)}) @blueprint.route('/config/reload') def config_reload(): if load_config(): return json.dumps({'code': 0, 'msg': ''}) else: return json.dumps({'code': -100, 'msg': 'yaml decode error'}) @blueprint.route('/reg') def reg(): init() return json.dumps({'code': 0, 'msg': ''}) def load_config(): global config try: with open(path + '/config.yaml', 'r') as f: config = yaml.load(f) if config is None: return False return True except yaml.YAMLError: return False def get_config(): global config if config is None: if not load_config(): return None return config def get_host(): conf = get_config() host = safe_get(conf, 'host') return host def ssr_load(): conf = get_config() # g = get_group(conf['reg_server'] + 'group') if conf is None: raise ValueError('not find config') services = safe_get(conf, 'ssr') if services is None: raise ValueError('not find \'ssr\' in config') sers = [] for service in services: con = safe_get(service, 'config') if not con and con == '': raise ValueError('SSR config not find') host = get_host() if host is None: raise ValueError('\'host\' is config is None') remarks = safe_value(safe_get(service, 'remarks'), 'default') restart = safe_value(safe_get(service, 'restart'), '') ssr = SSR(con, host, '', remarks, restart) sers.extend(ssr.get_services()) return sers def _reg(url, h, s, t): global path time.sleep(10) requests.post(url, json=json.dumps({'token': t, 'url': '%s/%s/' % (s, os.path.basename(path)), 'server': h})) exit(0) def get_group(url): req = requests.get(url) if not req: return 'default_group' j = req.json() if 'code' not in j and j['code'] != 0: return 'default_group' else: try: return j['data']['group'] except IndexError: return 'default_group' def init(): try: conf = get_config() if conf is None: raise ValueError('not find config') host = get_host() if host is None: raise ValueError('\'host\' is config is None') server = safe_value(safe_get(conf, 'server'), 'http://127.0.0.1:80') token = safe_value(safe_get(conf, 'token'), '') if 'reg_server' in conf and conf['reg_server']: p = Process(target=_reg, args=(conf['reg_server'] + 'reg', host, server, token)) p.start() except Exception as e: raise e init()
#!/usr/bin/env python3 import pygame from .OptimizationScene import OptimizationScene, STATUS from utils import utils, constants from utils import neopixelmatrix as graphics from random import random, shuffle import mimo import pygame class ScanningScene(OptimizationScene): def __init__(self): self.minigametitle = 'scanner.opt' OptimizationScene.__init__(self) self.coldown = 0 self.index = 8 self.line_color = 0xfff self.playing = False self.direction = 1 self.mode = 1 self.speed = 0.02 self.level = 0 self.displayed_figure_index = -1 self.fails = 0 self.detected_contact = False self.countdown = 45000 self.current_time = 45000 self.countdown_shadow = 0 self.guess_mode = 2 self.guess_direction = 1 self.guess_color = False self.colors = [ [0x00, 0x5f, 0xff], #blue [0x27, 0xff, 0x93], #green [0xf7, 0x5a, 0xff], #pink [0x8b, 0x27, 0xff], #purple [0xea, 0xe1, 0xf3] #white ] led_lights = [] index = 0 for color in self.colors: led_lights += [index] + color index += 1 mimo.set_optimization_leds_color(led_lights) self.radar_matrix = [ [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0] ] # load assets self.piece_sprites = [] self.piece_sprites.append(utils.Sprite(constants.SPRITES_SCANNING + 'piece_blue.png', 0, 0)) self.piece_sprites.append(utils.Sprite(constants.SPRITES_SCANNING + 'piece_green.png', 0, 0)) self.piece_sprites.append(utils.Sprite(constants.SPRITES_SCANNING + 'piece_pink.png', 0, 0)) self.piece_sprites.append(utils.Sprite(constants.SPRITES_SCANNING + 'piece_purple.png', 0, 0)) self.piece_sprites.append(utils.Sprite(constants.SPRITES_SCANNING + 'piece_white.png', 0, 0)) index = 0 self.progress = utils.Text( 'Nivel 1', self.normal_font, color = constants.PALETTE_TEXT_CYAN ) self.progress.SetPosition(640, 160) # sfx and audio audio_path = 'assets/audio/SFX/Scanning/' self.MG1_ObjSort = utils.get_sound(audio_path + 'MG1_ObjSort.ogg') self.MG1_ObjSort.set_volume(0.6) audio_path = 'assets/audio/SFX/Scanning/' self.MG1_Sweep = utils.get_sound(audio_path + 'MG1_Sweep.ogg') self.MG1_Sweep.set_volume(1.0) audio_path = 'assets/audio/SFX/Scanning/' self.MG1_Success = utils.get_sound(audio_path + 'MG1_Success.ogg') self.MG1_Success.set_volume(1) audio_path = 'assets/audio/SFX/Scanning/' self.MG1_Failed = utils.get_sound(audio_path + 'MG1_Failed.ogg') self.MG1_Failed.set_volume(1) self.NextFigure() def SetupMimo(self): mimo.set_led_brightness(150) mimo.set_tunners_enable_status(True) mimo.set_independent_lights(False, True) mimo.set_buttons_enable_status(False, True) mimo.set_optimization_buttons_mode([0,1, 1,1, 2,1, 3,1, 4,1]) mimo.set_optimization_buttons_active_status([0,0, 1,0, 2,0, 3,0, 4,0, 5,0]) def ProcessInputOpt(self, events, pressed_keys): for event in events: if event.type == pygame.KEYDOWN and event.key == pygame.K_d: self.GuessFigure(0) if event.type == pygame.KEYDOWN and event.key == pygame.K_f: self.GuessFigure(2) if event.type == pygame.KEYDOWN and event.key == pygame.K_g: self.GuessFigure(4) if event.type == pygame.KEYDOWN and event.key == pygame.K_c: self.GuessFigure(1) if event.type == pygame.KEYDOWN and event.key == pygame.K_v: self.GuessFigure(3) def Update(self, dt): OptimizationScene.Update(self, dt) if not self.playing: return self.coldown += dt if self.coldown > self.speed: self.coldown = 0 self.UpdateLineMov() def UpdateLineMov(self): self.index += self.direction if self.index < -12: self.playing = False graphics.clear() self.detected_contact = False elif self.index > 19: self.playing = False graphics.clear() self.detected_contact = False self.countdown_shadow -= 1 def RenderBody(self, screen): index = 0 for figure in FIGURES[self.level]: self.DrawFigure(screen, figure, index) index += 1 if self.detected_contact: if self.countdown_shadow <= 0: self.display_figure_shadow() self.progress.RenderWithAlpha(screen) if self.playing == False: return self.draw_color_line(0xfff&self.line_color, self.index) self.draw_color_line(0xddd&self.line_color, self.index-self.direction) self.draw_color_line(0xbbb&self.line_color, self.index-self.direction*2) self.draw_color_line(0x999&self.line_color, self.index-self.direction*3) self.draw_color_line(0x777&self.line_color, self.index-self.direction*4) self.draw_color_line(0x555&self.line_color, self.index-self.direction*5) self.draw_color_line(0x333&self.line_color, self.index-self.direction*6) self.draw_color_line(0x111&self.line_color, self.index-self.direction*7) self.draw_color_line(0x0&self.line_color, self.index-self.direction*8) if self.guess_color and not self.detected_contact: for j in range(0, 8): y = j for i in range(0, 8): x = i if self.mode == 2: y = i x = j if self.direction == -1: y = 7-y elif self.direction == -1: x = 7-x if self.radar_matrix[y][x]==1: self.Lock() graphics.render() def display_figure_shadow(self): color = [0x111, 0x222, 0x333, 0x444, 0xaaa, 0xddd, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xddd, 0xddd, 0xbbb, 0x999, 0x999, 0x666, 0x666, 0x111, 0x0][-self.countdown_shadow] graphics.setColor(color&self.line_color) for j in range(0, 8): y = j for i in range(0, 8): x = i if self.radar_matrix[y][x] == 1: graphics.plot(x, y) def Lock(self): if self.detected_contact: return self.detected_contact = True self.countdown_shadow = 4 print("detected collision") def draw_color_line(self, color, idx): if idx < 0 or idx > 7: return graphics.setColor(color) if self.mode == 1: graphics.plotLine(idx, 0, idx, 7) elif self.mode == 2: graphics.plotLine(0, idx, 7, idx) def DrawFigure(self, screen, figure, index): for j in range(0, len(figure)): for i in range(0, len(figure[0])): if figure[j][i] == 1: self.piece_sprites[index].Render(screen, (150+220*(index)+i*40, 350+j*40)) def NewScan(self, mode, direction): self.MG1_Sweep.play() self.line_color = 0xfff if mode == 1: self.line_color = 0xf0f if direction == 1 else 0x0f0 else: self.line_color = 0x0ff if direction == 1 else 0x80f self.mode = mode self.direction = int(direction) if self.direction == 1: self.index = -1 else: self.index = 8 self.playing = True self.detected_contact = False self.guess_color = mode == self.guess_mode and direction == self.guess_direction def SwipeHorizontal(self, distance): if self.state == STATUS.FINISHING: return if self.playing or abs(distance)>10: return self.speed = 0.02 self.NewScan(1, distance/abs(distance)) def SwipeVertical(self, distance): if self.state == STATUS.FINISHING: return if self.playing or abs(distance)>10: return self.speed = 0.02 self.NewScan(2, distance/abs(distance)) def NextFigure(self): self.radar_matrix = [ [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0], [0,0,0,0,0,0,0,0] ] shuffle(FIGURES[self.level]) self.displayed_figure_index = int(random()*len(FIGURES[self.level])) self.figure = FIGURES[self.level][self.displayed_figure_index] x = 4 - int(len(self.figure[0])/2) y = 4 - int(len(self.figure)/2) for j in range(0,len(self.figure)): for i in range(0, len(self.figure[0])): self.radar_matrix[j+y][i+x] = self.figure[j][i] # f0f pink #2 # mode 1, direction 1 # 0f0 green #1 # mode 1, direction -1 # 0ff blue #0 # mode 2, direction 1 # 80f purple #3 # mode 2, direction -1 if self.displayed_figure_index == 0: self.guess_mode = 2 self.guess_direction = 1 elif self.displayed_figure_index == 1: self.guess_mode = 1 self.guess_direction = -1 elif self.displayed_figure_index == 2: self.guess_mode = 1 self.guess_direction = 1 elif self.displayed_figure_index == 3: self.guess_mode = 2 self.guess_direction = -1 self.MG1_ObjSort.play() # display options in screen def GuessFigure(self, index): if index == self.displayed_figure_index: self.level += 1 self.MG1_Success.play() self.progress.SetText('Nivel {}'.format(self.level + 1)) else: self.fails += 1 self.MG1_Failed.play() self.progress.SetText('Nivel {}'.format(self.level + 1)) if self.fails >= 10: self.score = self.level/5 self.FinishOptimization() return if self.level >= len(FIGURES): self.level -= 1 self.score = self.level/5 self.FinishOptimization() else: self.NextFigure() FIGURES = [ # first level [[ [0,1,0], [1,1,1] ],[ [0,1], [1,1], [0,1] ],[ [1,0], [1,1], [1,0] ],[ [1,1,1], [0,1,0] ],[ [1,0,0], [1,1,1] ]], # second level [[ [0,1,0], [1,1,1] ],[ [0,1,1], [1,1,0] ],[ [1,1,0], [0,1,1] ],[ [0,1,1], [0,1,1] ],[ [0,0,1], [1,1,1] ]], # third level [[ [0,1,0], [1,1,1], [0,1,0] ],[ [1,1,0], [0,1,0], [0,1,1] ],[ [1,1,1], [0,1,0], [0,1,0] ],[ [1,1,1], [1,0,0], [1,0,0] ],[ [1,1,0], [1,0,0], [1,1,0] ]], # third level [[ [0,0,0,0], [1,0,0,1], [1,1,1,1] ],[ [0,1,1,0], [0,1,1,1], [0,0,1,0] ],[ [0,1,0,0], [1,1,1,1], [0,0,1,0] ],[ [0,0,0,1], [1,1,1,1], [0,0,0,1] ],[ [0,0,1,1], [0,1,1,1], [0,0,0,1] ]] ]
#!/usr/bin/env python #coding=utf-8 #__author__ = louis, # __date__ = 2017-08-16 14:49, # __email__ = yidongsky@gmail.com, # __name__ = urls.py from django.conf.urls import url from django.views.decorators.csrf import csrf_exempt from django.contrib.auth.decorators import login_required from .views import ( StatusView, AlertListView, NewAlertView, EditAlertView, DeleteAlertView, HostListView, HostDetailView, RecordDataApiView, SelectDataApiView ) urlpatterns = [ url(r'^alerts/$', login_required(AlertListView.as_view()), name='alerts-list'), url(r'^alerts/new/$', login_required(NewAlertView.as_view()), name='alerts-new'), url(r'^alerts/(?P<pk>\d+)/edit/$', login_required(EditAlertView.as_view()),name='alerts-edit'), url(r'^alerts/(?P<pk>\d+)/delete/$', login_required(DeleteAlertView.as_view()),name='alerts-delete'), url(r'^record/$', csrf_exempt(RecordDataApiView.as_view()),name='record-data'), url(r'^select/$', SelectDataApiView.as_view(),name='Select-data'), url(r'^hosts/$', login_required(HostListView.as_view()), name='hosts-list'), url(r'^(?P<id>\d+)/$', HostDetailView, name='hosts-detail'), url(r'^$', StatusView.as_view(), name="status"), ]
#! /usr/bin/python # -*- coding: utf-8 -*- #PyQt import PyQt4.QtCore as QtCore import PyQt4.QtGui as QtGui #View from View.widget import Ui_Widget #Controller from Controller.StateCell import StateCellFactory from Controller.ControllerDijkstra import ControllerDijkstra class ViewCell(QtGui.QTableWidgetItem): __DEBUG__ = False def __init__(self): """ @brief ViewCellの初期化. """ QtGui.QTableWidgetItem.__init__(self) self.state = None self.state_factory = StateCellFactory() self.change_state(StateCellFactory.ROAD) def change_state(self, i_State): """ @brief ステートを変更. """ self.state = self.state_factory.create(i_State) self.draw() def draw(self): """ @brief セルをステートに合わせて描画. """ theBackColor = self.state.back_color() theForeColor = self.state.fore_color() theText = self.state.text() theImage = self.state.image() #前景色を設定. theColor = QtGui.QColor(theForeColor) theBrush = QtGui.QBrush() theBrush.setColor(theColor) self.setForeground(theBrush) #背景色を設定. self.setBackgroundColor(theBackColor) #表示テキストを設定. self.setText(theText) if(self.__DEBUG__): print("ViewCell.draw.BackColor:{0}".format(theBackColor)) if(self.__DEBUG__): print("ViewCell.draw.ForeColor:{0}".format(theForeColor)) if(self.__DEBUG__): print("ViewCell.draw.text:{0}".format(theText)) if(self.__DEBUG__): print("ViewCell.draw.Image:{0}".format(theImage)) class ViewWidget(QtGui.QMainWindow, Ui_Widget): __DEBUG__ = False SIZE = 30 INTERVAL = 30 def __init__(self, *args, **kw): """ @brief ViewWidgetの初期化. """ QtGui.QMainWindow.__init__(self, *args, **kw) self.setupUi(self) #Dijkstra設定を生成. self.controller_dijikstra = ControllerDijkstra() self.set_Geometry_size(self.SIZE, self.SIZE) #ダブクリックで編集しないようにする. self.tableWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) #self.setFixedSize(370, 400) #タイマーの設定. self._interval = self.INTERVAL self._timer = QtCore.QTimer(parent=self) self._timer.setInterval(self._interval) #タイマーと動作を繋げる. self._timer.timeout.connect(self.do_loop) #ボタンと動作を繋げる. self.startButton.clicked.connect(self.start_action) self.stopButton.clicked.connect(self.stop_action) self.openButton.clicked.connect(self.open_action) self.resetButton.clicked.connect(self.reset_action) #マップをリセット. self.reset_action() #初期はストップボタンを押下したのと同等状態とする. self.stop_action() def set_Geometry_size(self, i_Row, i_Col): """ @brief TableWidgetの設定. """ self.tableWidget.setRowCount(i_Row) self.tableWidget.setColumnCount(i_Col) Hsize = self.tableWidget.horizontalHeader().defaultSectionSize() Vsize = self.tableWidget.verticalHeader().defaultSectionSize() self.tableWidget.setGeometry(QtCore.QRect(20, 50, (i_Row+2)*Vsize, (i_Col+2)*Hsize)) for theRow in range(i_Row): for theCol in range(i_Col): theItem = ViewCell() self.tableWidget.setItem(theRow, theCol, theItem) def do_loop(self): """ @brief タイマーカウント 動作 """ if(0==len(self.controller_dijikstra.routeList)): self.stop_action() return theNode = self.controller_dijikstra.routeList.pop(0) if(None == theNode): self.stop_action() return if("Start" == theNode.__str__()): return if("Goal" == theNode.__str__()): return theX = theNode.x theY = theNode.y theItem = self.tableWidget.item(theY, theX) theItem.setSelected(True) def start_action(self): """ @brief スタートボタン 押下. """ #選択不可に設定. self.tableWidget.setSelectionMode(QtGui.QAbstractItemView.NoSelection) #ダクストラを設定. self.controller_dijikstra.make_dijkstra(self.SIZE, self.SIZE, self.tableWidget) #ルート探索. self.controller_dijikstra.search_root() #デバッグ用に最短ルートを出力. if(self.__DEBUG__): self.controller_dijikstra.print_debug_min_route() #更新用タイマースタート. self._timer.start() def stop_action(self): """ @brief ストップボタン 押下. """ ##表示用セルを選択可に設定. #self.tableWidget.setSelectionMode(QtGui.QAbstractItemView.MultiSelection) #選択不可に設定. self.tableWidget.setSelectionMode(QtGui.QAbstractItemView.NoSelection) #更新用タイマーストップ. self._timer.stop() def open_action(self): """ @brief オープンボタン 押下. """ self.reset_action() #テキストファイルからマップを読み込み. self.controller_dijikstra.set_any_map(self.tableWidget) self.open_action_implement() def open_action_implement(self): ##表示用セルを選択可に設定. #self.tableWidget.setSelectionMode(QtGui.QAbstractItemView.MultiSelection) #選択不可に設定. self.tableWidget.setSelectionMode(QtGui.QAbstractItemView.NoSelection) #更新用タイマーストップ. self._timer.stop() def reset_action(self): """ @brief リセットボタン 押下. """ #選択不可に設定. self.tableWidget.setSelectionMode(QtGui.QAbstractItemView.NoSelection) #更新用タイマーストップ. self._timer.stop() #ダイクストラをクリア. self.controller_dijikstra.clear() for theRow in range(self.SIZE): for theCol in range(self.SIZE): theItem = ViewCell() self.tableWidget.setItem(theRow, theCol, theItem) theItem.setSelected(False) #初期マップを読み込み. self.controller_dijikstra.set_initial_map(self.tableWidget) self.open_action_implement()
#!/usr/bin/env python3 # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. __app__ = "onewire2mqtt Adapter" __VERSION__ = "0.971" __DATE__ = "16.02.2020" __author__ = "Markus Schiesser" __contact__ = "M.Schiesser@gmail.com" __copyright__ = "Copyright (C) 2019 Markus Schiesser" __license__ = 'GPL v3' import os import sys import time import json import logging from configobj import ConfigObj from library.ds18b20 import ds18b20 from library.devicereader import devicereader from library.mqttclient import mqttclient from library.logger import loghandler class manager(object): def __init__(self,configfile='./onewire2mqtt.cfg'): # self._log = loghandler('ONEWIRE') self._configfile = configfile self._logcfg = None self._mqttbroker = None self._onewire = None self._rootLoggerName = '' def readConfig(self): _config = ConfigObj(self._configfile) if bool(_config) is False: print('ERROR config file not found',self._configfile) sys.exit() self._cfg_log = _config.get('LOGGING',None) self._cfg_mqtt = _config.get('BROKER',None) self._cfg_onewire = _config.get('ONEWIRE',None) self._cfg_influx = _config.get('INFLUX',None) return True def startLogger(self): self._root_logger = loghandler(self._cfg_log.get('NAME','ONEWIRE')) self._root_logger.handle(self._cfg_log.get('LOGMODE','PRINT'),self._cfg_log) self._root_logger.level(self._cfg_log.get('LOGLEVEL','DEBUG')) self._rootLoggerName = self._cfg_log.get('NAME', 'ONEWIRE') self._log = logging.getLogger(self._rootLoggerName + '.' + self.__class__.__name__) return True def startOneWire(self): self._log.debug('Methode: startOneWire()') os.system('modprobe w1-piface') os.system('modprobe w1-therm') return True def readOneWire(self): self._log.debug('Methode: readOneWire()') result={} basedir = self._cfg_onewire.get('BASEDIR','/temp') devicefile = self._cfg_onewire.get('DEVICEFILE','w1_slave') # deviceId = self._cfg_onewire.get('DEVICEID','28') ds = ds18b20(self._rootLoggerName) dr = devicereader(basedir,devicefile,self._rootLoggerName) devices = dr.readDevice() # print('devices found:', devices) for deviceId, deviceFile in devices.items(): print(dr.readFile(deviceFile)) data = dr.readFile(deviceFile) if data is not None: ds.readValue(data) result[deviceId]=ds.getCelsius() # print(result) return result def publishData(self,data): self._log.debug('Methode: publishData(%s)',data) mqttpush = mqttclient(self._rootLoggerName) _topic = self._cfg_mqtt.get('PUBLISH', '/ONEWIRE') mqttpush.pushclient(self._cfg_mqtt) mqttpush.publish(_topic, json.dumps(data)) time.sleep(1) mqttpush.disconnect() return True def run(self): self.readConfig() self.startLogger() self._log.info('Startup, %s %s %s' % (__app__, __VERSION__, __DATE__)) self.startOneWire() data = self.readOneWire() self.publishData(data) return True if __name__ == "__main__": if len(sys.argv) == 2: configfile = sys.argv[1] else: configfile = './onewire2mqtt.cfg' mgr_handle = manager(configfile) mgr_handle.run()
# Fixed Point Iteration Method # Importing math to use sqrt function import math from sympy import * def func(expr, value, x): return expr.subs(x, value) # Re-writing f(x)=0 to x = g(x) def funcg(expr, value, x): return expr.subs(x, value) # Implementing Fixed Point Iteration Method def fixedPointIteration(expr, gx, x0, e, N, x): print('\n\n*** FIXED POINT ITERATION ***') step = 1 flag = 1 condition = True while condition: x1 = funcg(gx, x0, x) print('Iteration-%d, x1 = %0.6f and f(x1) = %0.6f' % (step, x1, func(expr, x1, x))) x0 = x1 step = step + 1 if step > N: flag = 0 break condition = abs(func(expr, x1, x)) > e if flag == 1: # print('\nRequired root is: %0.8f' % x1) return "%d ): %.6f" % (step, x1) else: print('\nNot Convergent.') # Main code def mainFunc(function, g_x, N, e, x0): x = var('x') # the possible variable names must be known beforehand... expr = sympify(function) g_x = sympify(g_x) return fixedPointIteration(expr, g_x,x0, e, N,x)
# splash:runjs("document.getElementsByClassName('laypage_next').click()") # "getElementByXpath("//html[1]/body[1]/div[1]")" # document.querySelector('.laypage_next').setAttribute('data-page',%d);document.querySelector('.laypage_next').click() luaScript = ''' function main(splash) splash.images_enabled = splash.args.images splash:go(splash.args.url) splash:wait(2) js = string.format(splash.args.js) splash:runjs(js) splash:wait(2) return splash:html() end ''' luaSimple = ''' function main(splash) splash.images_enabled = splash.args.images splash:go(splash.args.url) splash:wait(2) return splash:html() end '''
# 1. Thêm các thư viện cần thiết import sys import numpy as np import matplotlib.pyplot as plt from keras.utils import np_utils from keras.datasets import mnist #Định nghĩa về hàm softmax def softmax_stable(Z): # Z = Z.reshape(Z.shape[0], -1) e_Z = np.exp(Z - np.max(Z, axis = 1, keepdims = True))# Tránh overflow A = e_Z / e_Z.sum(axis = 1, keepdims = True) return A # Định nghĩa về hàm mất mát def softmax_loss(X, y, W):# y là giá trị đầu ra thực one-hot (là list của chỉ số những giá trị có xác suất max), là matrix cỡ (Nx1), X matrix cỡ Nxd, W matrix cỡ dxC A = softmax_stable(X.dot(W)) # A là giá trị dự đoán, có cỡ (NxC) id0 = range(X.shape[0]) # indexes in axis 0, indexes in axis 1 are in y k=np.argmax(y,axis=1) # Lấy chỉ số từ những hàng (mỗi hàng có C node giá trị) có gía trị 1 của y #⬆️ id0 chạy từ 0--> N-1 return -np.mean(np.log(A[id0, k]))# Hàm log trả list(keepdims=False). Lấy giá trị trung bình của hàm mất mát của các điểm data, trả kq là 1 số thực # Định nghĩa về Đạo hàm hàm mất mát def softmax_grad(X, y, W):# y là one-hot A = softmax_stable(X.dot(W)) # shape of (N, C) id0 = range(X.shape[0]) k=np.argmax(y,axis=1) A[id0,k]-=1 #A-Y,shapeof(N,C) return X.T.dot(A)/X.shape[0] # Định nghĩa hàm dự đoán. Từ các hàng (axis=1) matrix, chọn ra chỉ số của giá trị xác suất max trong hàng đó. def pred(W, X):# Trả list cỡ (Nx1) return np.argmax(X.dot(W), axis =1)#X cỡ (Nxd), W cỡ (dxC) # Định nghĩa hàm huấn luyện def softmax_fit(X, y, W, lr = 0.01, nepoches = 10, tol = 1e-5, batch_size = 10): acc_plot=[0] ep_plot=[0] W_old = W.copy() ep = 0 loss_hist = [softmax_loss(X, y, W)] # store history of loss N = X.shape[0] nbatches = int(np.ceil(float(N)/batch_size))# Làm cho số nbatches sang số nguyên while ep < nepoches: mix_ids = np.random.permutation(N) # mix data. Xuất ra array data (List) sắp xếp ngẫu nhiên trong (0, N) for i in range(nbatches): # get the i-th batch batch_ids = mix_ids[batch_size*i:min(batch_size*(i+1), N)] X_batch, y_batch = X[batch_ids], y[batch_ids] W -= lr*softmax_grad(X_batch, y_batch, W) # update gradient descent loss_hist.append(softmax_loss(X, y, W)) ep_plot.append(ep)# Tạo list các epoch (để vẽ đồ thị) acc_plot.append(acc(W,X_bar_test,Y_test))# Tạo list các giá trị Accuracy (để vẽ đồ thị) ep += 1 if np.linalg.norm(W - W_old)/W.size < tol: break #print('Số epoches:%d, Giá trị hàm loss:%f' %(ep, loss_hist[-1])) W_old = W.copy() return W, loss_hist,ep_plot,acc_plot # Hàm tính độ chính xác def acc (W,X,y): y_pred=pred(W, X) # Shape of (N,1) y=np.argmax(y,axis=1) # Đưa về one-hot shape of (N,1) acc=100*np.mean(y_pred==y) return acc # 2. Load dữ liệu MNIST (X_train, y_train), (X_test, y_test) = mnist.load_data() X_val, y_val = X_train[50000:60000,:], y_train[50000:60000] #shape of (10000,28,28) X_train, y_train = X_train[:50000,:], y_train[:50000] # shape of (50000,28,28) # Reshape lại dữ liệu. Giảm bớt giá trị dữ liệu (Vì màu xám có giá trị 0 or 255) X_train=X_train.reshape(-1,28*28)/255 X_val=X_val.reshape(-1,28*28)/255 X_test=X_test.reshape(-1,28*28)/255 # Tạo input matrix mở rộng (Thêm phần tử 1 vào mỗi dữ liệu) X_bar_train = np.concatenate((np.ones((X_train.shape[0],1)),X_train),axis=1) # shape of (50000, 785) X_bar_val = np.concatenate((np.ones((X_val.shape[0],1)),X_val),axis=1) # shape of (50000, 785) X_bar_test = np.concatenate((np.ones((X_test.shape[0],1)),X_test),axis=1) # shape of (50000, 785) # 3. One hot encoding label (Y) Y_train = np_utils.to_categorical(y_train, 10) # shape of (50000,10) Y_val = np_utils.to_categorical(y_val, 10) # shape of (10000,10) Y_test = np_utils.to_categorical(y_test, 10) # shape of (10000,10) #4. Training data # Khởi tạo matrix trọng số ban đầu ngẫu nhiên cỡ (785x10) def W(X,y):# y là giá trị thực W = np.random.rand(X.shape[1],y.shape[1]) return W #Chạy trên training set W_0_train=W(X_bar_train,Y_train)# Matrix trọng số ban đầu #W_init=np.zeros((785,10)) W_train,loss_hist,ep_plot,acc_plot = softmax_fit(X_bar_train,Y_train,W_0_train,lr=0.1,nepoches=20,tol=1e-5,batch_size=10) plt.xlim(xmax=12) plt.ylim(ymax=100) plt.plot(ep_plot,acc_plot) plt.xlabel('epoch') plt.ylabel('accuracy(%)') #plt.legend() plt.show() print('Training accracy: %.2f ' % acc(W_train,X_bar_test,Y_test)) sys.exit(0)
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from dataclasses import dataclass from pants.backend.rust.target_types import RustPackageTarget from pants.core.goals.tailor import PutativeTarget, PutativeTargets, PutativeTargetsRequest from pants.engine.fs import PathGlobs, Paths from pants.engine.rules import Get, collect_rules, rule from pants.engine.unions import UnionRule from pants.util.dirutil import group_by_dir from pants.util.logging import LogLevel @dataclass(frozen=True) class PutativeRustTargetsRequest(PutativeTargetsRequest): pass @rule(level=LogLevel.DEBUG, desc="Determine candidate Rust targets to create") async def find_putative_rust_targets(request: PutativeRustTargetsRequest) -> PutativeTargets: all_cargo_toml_files = await Get(Paths, PathGlobs, request.path_globs("Cargo.toml")) putative_targets = [ PutativeTarget.for_target_type( RustPackageTarget, path=dirname, name=None, triggering_sources=sorted(filenames), ) for dirname, filenames in group_by_dir(all_cargo_toml_files.files).items() ] return PutativeTargets(putative_targets) def rules(): return [*collect_rules(), UnionRule(PutativeTargetsRequest, PutativeRustTargetsRequest)]
import torch import argparse import os import numpy as np import biovec import scripts.ffnn.model as ffnn parser = argparse.ArgumentParser(description='Predict ppi bindings') parser.add_argument('-i', '--input_file', required=True) parser.add_argument('-o', '--output_file', required=True) args = parser.parse_args() # Read files from user input_file = args.input_file output_file = args.output_file # Check that files are not missing if not os.path.isfile(input_file) or not os.path.isfile(output_file): print("missing files") exit() # Prepare word2vec embeddings from user input modeL = biovec.models.load_protvec("trained_models/trained.model") modeL.wv.load_word2vec_format(fname="output/trained.vectors") def compute_vector(word): return sum([modeL.wv.get_vector(x) for x in [word[i:i + 3] for i in range(len(word) - 2)]]) def compose_data(seq): vectors = [] vectors.extend([compute_vector(seq[i - 3:i + 4]) for i in range(3, len(seq) - 3)]) return np.stack(vectors, axis=0) # Collect protein names and amino-acids sequences proteins_names = [] protein_sequences = [] with open(input_file) as file: i = 0 for line in file: line = line.strip() if i % 2 == 0: proteins_names.append(line) else: protein_sequences.append(line) i = i + 1 # load model device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Load NN and use same parameters as for the trained model dimensions = compose_data(protein_sequences[0]).shape[1] model = ffnn.NeuralNet(input_size=dimensions).to(device) # Load saved model and set to evaluation mode model.load_state_dict(torch.load("trained_models/ffnn_model.ckpt")) model.eval() # Write to output file with open(output_file, "w") as file: for i in range(len(proteins_names)): # Write protein name file.write(proteins_names[i] + "\n") # Note: 3-gram model does not allow us to predict the first 3 residues # Mark these residues as n/a j = 0 for j in range(3): file.write(protein_sequences[i][j] + "\tn/a\tn/a\n") # Create n-grams from sequence for inputs X_test = compose_data(protein_sequences[i]) # Predict on new sequences prediction = model(torch.from_numpy(np.float32(X_test))) _, predicted = torch.max(prediction, 1) predicted_bindings = predicted.numpy() # Get prediction probabilities sm = torch.nn.Softmax(dim=1) probabilities, _ = torch.max(sm(prediction), 1) prediction_probabilities = probabilities.detach().numpy() # Parse predicted bindings and the probability for a in range(3, len(protein_sequences[i]) - 3): if predicted_bindings[a - 3] == 0: binding = '-' else: binding = '+' file.write(protein_sequences[i][a] + "\t" + str(binding) + "\t" + str(round(prediction_probabilities[a - 3],3)) + "\n") # Note: 3-gram model does not allow us to predict the last 3 residues # Mark these residues as n/a for j in range(-3, 0): file.write(protein_sequences[i][j:] + "\tn/a\tn/a\n" if j == -1 else protein_sequences[i][j:j+1] + "\tn/a\tn/a\n") print("\nPrediction has finished. Please check output files.")
import pandas import math import numpy class Params: # creates array per center type with specific centers I_c def create_specific_centers(self): set_I_c = [] self.dict_centerinstance_centertype = {} center_counter = 0 j = 0 for i in self.ub_per_center: next_row = [center_counter + j for j in range(i)] for n in next_row: self.dict_centerinstance_centertype[n] = self.centernames[j] set_I_c.append(next_row) center_counter = next_row[-1] + 1 if next_row else center_counter j += 1 print("Set ic ", set_I_c) print(self.dict_centerinstance_centertype) return set_I_c # creates array with specific gates per superblock def create_gsb(self): G_sb = [] count = 0 for sb in range(self.num_SB_total): new_row = [] for n in range(self.num_G_per_SB): new_row.append(count) count += 1 G_sb.append(new_row) return G_sb def get_ub_for_center(self): ub_per_center = [math.floor((self.demand_c[i] * (self.num_SB_total - len(self.subset_SB_with_GC)) / (self.capacity_c[i] * self.beta[i]))) for i in range(len(self.capacity_c))] return ub_per_center # creates dictionary of type [4,1],[2,3] # means: on superblock 4, we have centertype 1 (that has to be a gigacenter), on superblock 2, we have c-type 3... def create_dict_for_gigacenter(self, index_sb_with_gc): self.dict_sb_giga_centertype = {} for i in index_sb_with_gc: index_gigacenter = (numpy.where(numpy.array(self.centernames) == i[1]))[0][0] self.dict_sb_giga_centertype[i[0]] = index_gigacenter # given the minimum distance that has to be in between the origin superblock and destination commercial building # for a share of the population in each superblock, we generate a 2-dim. array # first dimension: index of origin gate # second dimension: all other gates in a sufficiently large distance to that gate def create_subset_gates_for_commercials(self, zone_dist_begin, zone_dist_end): subset_G_in_zone = [] for i in self.distances: matches_current_gate = [] for j in self.distances: if self.distances.iloc[i, j] > zone_dist_begin and self.distances.iloc[i, j] < zone_dist_end: matches_current_gate.append(j) subset_G_in_zone.append(matches_current_gate) return subset_G_in_zone # index_sb_with_gc is an array in which we predefine sb-numbers and the assigned gigacenter type # [4,'Hospital'][10, 'University']... def __init__(self, index_sb_with_gc): # sb total, inhabitants self.num_SB_total = 16 self.set_SB = [i for i in range(self.num_SB_total)] self.inhabitants_per_sb = 5000 # input parameters that we want to obtain from the excel file filePath = 'preprocessing/Daten_real.xlsx' #MFMS_Daten_Dummy.xlsx' #'preprocessing/MFMS_Daten_Dummy.xlsx' df = pandas.read_excel(filePath) self.area_c = df['area_c'].array demand_rate_c = df['demand_rate_c'].array self.demand_c = [self.inhabitants_per_sb * demand_rate_c[i] for i in range(len(demand_rate_c))] self.capacity_c = df['capacity_c'].array self.centernames = df['centernames'].array self.max_dist_c = df['max_dist_c'].array self.beta = df['beta'].array self.set_C = [i for i in range(len(self.area_c))] self.num_C_total = len(self.set_C) self.subset_C_gigac = [i for i in self.set_C if df['gigacenter'].array[i]] # center types are a giga c. self.subset_C_commc = [i for i in self.set_C if df['commercial'].array[i]] # center types are commercial b. # distance matrix filePath = 'preprocessing/Distance_Matrix_Layout_1.xlsx' # 'Distance_Matrix_Layout_1.xlsx'#'preprocessing/Distance_Matrix_Layout_1.xlsx' self.distances = pandas.read_excel(filePath, header=None) # commercial building + commercial traffic special settings # first we set the "zone" distances self.com_buildings_zone1_dist = 4000 self.com_buildings_zone2_dist = 5000 # also we set the share of people that at least have to travel this distance self.prop_demand_zone1 = 0.1 self.prop_demand_zone2 = 0.1 # then we create a set that specifies the gates in a particular zone for any given gate # these destination gates then have a min. distance to the origin gate self.subset_G_zone1 = self.create_subset_gates_for_commercials(self.com_buildings_zone1_dist, self.com_buildings_zone2_dist) self.subset_G_zone2 = self.create_subset_gates_for_commercials(self.com_buildings_zone2_dist, 100000) print("set 1: ", self.subset_G_zone1) print("set 2: " , self.subset_G_zone2) # big M self.M = 500000 # gates self.num_G_per_SB = 2 # number of gates per superblock self.num_G_total = self.num_G_per_SB * self.num_SB_total # number of total gates of city (general gates) self.set_G = [i for i in range(self.num_G_total)] self.subset_G_SB = self.create_gsb() # specific gates per superblock; G_sb as subset of G # giga center settings (SB_with_GC is subset with the index of SB's that are reserved for GB); # in the dictionary, we can look up the assigned center type for such a reserved SB. self.subset_SB_with_GC = [int(i) for i in numpy.array(index_sb_with_gc)[:, 0]] # centers self.ub_per_center = self.get_ub_for_center() # calculates the max.# of possible center instances for each type self.num_I_total = sum(i for i in self.ub_per_center) # i in I describes all used centers independent of center type self.set_I = [i for i in range(self.num_I_total)] # we have to make sure that the max. allowed number of placable centers is not 0! Otherwise infeasible model! print("Assertion:") print(self.ub_per_center) for i in range(self.num_C_total): assert self.ub_per_center[i] > 0 ,\ "Invalid Input Data for Superblock Model. The given min. utilization isn't possible for center "+str(i) self.subset_I_c = self.create_specific_centers() # assigns value to self.I_c # max area settings (usually, lot of space => but in case sb's are reserved for a giga center, max area = 0) self.max_area_normal = 18000 self.max_area_gc = 0 self.max_area_per_sb = [self.max_area_normal if sb not in self.subset_SB_with_GC else self.max_area_gc for sb in self.set_SB] # dictionary self.create_dict_for_gigacenter(index_sb_with_gc) params = Params([[5,'Hospital'],[6,'University'],[9,'Industry']]) print(params.distances) print(params.subset_C_gigac) print(params.subset_C_commc)
# -*-coding:cp949-*- ################################################## # program: Organizer.py # author: Pingvino Kay # version: 0.8 # date: 13/08/29 ################################################## import sys import os import datetime import tkinter.ttk def organize(x=0, y=False): for i in os.listdir(): if i == os.path.basename(sys.argv[0]): pass elif x == 0: if os.path.isfile(i) == True: # 파일일 경우 확장자별로 분류해 정리한다. if os.path.splitext(i)[1] != '': # 확장자가 없을 경우는 따로 폴더를 만들어 정리한다. os.renames(i, os.path.abspath(os.path.splitext(i)[1] + '\\' + i)) else: os.renames(i, os.path.abspath('확장자 없음\\' + i)) else: os.renames(i, os.path.abspath("폴더\\" + i)) elif x == 1: os.renames(i, os.path.abspath(datetime.datetime.fromtimestamp(float(os.stat(i)[7])).strftime('%Y-%m-%d') + '\\' + i)) # 만든 날짜별로 정리한다. elif x == 2: os.renames(i, os.path.abspath(datetime.datetime.fromtimestamp(float(os.stat(i)[8])).strftime('%Y-%m-%d') + '\\' + i)) # 수정한 날짜별로 정리한다. elif x == 9: y = False for i in os.listdir(): # 원래대로 되돌린다. if i == os.path.basename(sys.argv[0]): pass elif os.path.isdir(i) == True: if os.listdir(i) == []: pass else: for j in os.listdir(i): os.rename(os.path.abspath(i + '\\' + j), os.path.abspath(j)) os.rmdir(i) else: pass else: pass if y == True: for i in os.listdir(): if i == os.path.basename(sys.argv[0]): pass else: os.renames(i, os.path.abspath('Organized\\' + i)) else: pass class MyApp: def __init__(self, parent): self.myParent = parent self.v0 = tkinter.IntVar() self.v1 = tkinter.BooleanVar() self.n = tkinter.ttk.Notebook(parent) self.n.pack() self.myC1 = tkinter.Frame(self.n) # 확장자별로 정리하는 라디오 버튼이 들어갈 프레임. self.myC1.pack() self.myC2 = tkinter.Frame(self.n) # 시간별로 정리하는 라디오 버튼이 들어갈 프레임. self.myC2.pack() self.myC3 = tkinter.Frame(self.n) # 정리된 폴더를 원래대로 되돌릴 라디오 버튼이 들어갈 프레임. self.myC3.pack() self.myCy = tkinter.Frame(parent) self.myCy.pack() self.myCx = tkinter.Frame(parent) # 버튼이 들어갈 프레임. self.myCx.pack() self.R1 = tkinter.Radiobutton(self.myC1, text='확장자별로 정리합니다.', variable=self.v0, value=0) self.R1.pack() self.R2 = tkinter.Radiobutton(self.myC2, text='만든 날짜별로 정리합니다.', variable=self.v0, value=1) self.R2.pack() self.R3 = tkinter.Radiobutton(self.myC2, text='수정한 날짜별로 정리합니다.', variable=self.v0, value=2) self.R3.pack() self.R4 = tkinter.Radiobutton(self.myC3, text='원래대로 되돌립니다.', variable=self.v0, value=9) self.R4.pack() self.C1 = tkinter.Checkbutton(self.myCy, text="정리한 후, 폴더 하나에 넣습니다.", variable=self.v1) self.C1.pack() self.B1 = tkinter.Button(self.myCx, text='적용') self.B1.pack(side=tkinter.LEFT) self.B1.bind("<Button-1>", self.B1Click) self.B2 = tkinter.Button(self.myCx, text='종료') self.B2.pack(side=tkinter.LEFT) self.B2.bind("<Button-1>", self.B2Click) self.n.add(self.myC1, text='확장자') self.n.add(self.myC2, text='시간') self.n.add(self.myC3, text='되돌리기') def B1Click(self, event): organize(self.v0.get(), self.v1.get()) # v 값에 따라 정리한다. def B2Click(self, event): self.myParent.destroy() # 애플리케이션을 종료한다. root = tkinter.Tk() root.title("정리기") root.minsize(200, 80) # 애플리케이션의 최소 크기 myapp = MyApp(root) root.mainloop()
def length(stripe, speed): kilometres = 0 ice_in_row = 0 onmountain = False for terrain in stripe: kilometres += 1 if terrain != 'I': ice_in_row = 0 if terrain == 'G': speed -= 27 elif terrain == 'I': ice_in_row += 1 speed += (12 * ice_in_row) elif terrain == 'A': speed -= 59 elif terrain == 'S': speed -= 212 elif terrain == 'F': if onmountain: speed += 35 onmountain = False else: speed -= 70 onmountain = True if speed <= 0: return kilometres print(length(open('terreng.txt').read(), 10703437))
#!/usr/bin/env python # -*- coding: UTF-8 -*- import sys import os.path from PyQt4 import QtCore, QtGui QtCore.Signal = QtCore.pyqtSignal import vtk from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor class VTKFrame(QtGui.QFrame): def __init__(self, parent = None): super(VTKFrame, self).__init__(parent) self.vtkWidget = QVTKRenderWindowInteractor(self) self.iren = self.vtkWidget.GetRenderWindow().GetInteractor() vl = QtGui.QVBoxLayout(self) vl.addWidget(self.vtkWidget) vl.setContentsMargins(0, 0, 0, 0) # Create source geometricObjects = list() geometricObjects.append(vtk.vtkArrowSource()) geometricObjects.append(vtk.vtkConeSource()) geometricObjects.append(vtk.vtkCubeSource()) geometricObjects.append(vtk.vtkCylinderSource()) geometricObjects.append(vtk.vtkDiskSource()) geometricObjects.append(vtk.vtkLineSource()) geometricObjects.append(vtk.vtkRegularPolygonSource()) geometricObjects.append(vtk.vtkSphereSource()) renderers = list() mappers = list() actors = list() textmappers = list() textactors = list() # Create a common text property. textProperty = vtk.vtkTextProperty() textProperty.SetFontSize(10) textProperty.SetJustificationToCentered() # Create a parametric function source, renderer, mapper # and actor for each object. for idx, item in enumerate(geometricObjects): geometricObjects[idx].Update() mappers.append(vtk.vtkPolyDataMapper()) mappers[idx].SetInputConnection(geometricObjects[idx].GetOutputPort()) actors.append(vtk.vtkActor()) actors[idx].SetMapper(mappers[idx]) textmappers.append(vtk.vtkTextMapper()) textmappers[idx].SetInput(item.GetClassName()) textmappers[idx].SetTextProperty(textProperty) textactors.append(vtk.vtkActor2D()) textactors[idx].SetMapper(textmappers[idx]) textactors[idx].SetPosition(150, 16) renderers.append(vtk.vtkRenderer()) gridDimensions = 3 for idx in range(len(geometricObjects)): if idx < gridDimensions * gridDimensions: renderers.append(vtk.vtkRenderer()) rendererSize = 300 # Setup the RenderWindow self.vtkWidget.GetRenderWindow().SetSize(rendererSize * gridDimensions, rendererSize * gridDimensions) # Add and position the renders to the render window. viewport = list() for row in range(gridDimensions): for col in range(gridDimensions): idx = row * gridDimensions + col viewport[:] = [] viewport.append(float(col) * rendererSize / (gridDimensions * rendererSize)) viewport.append(float(gridDimensions - (row+1)) * rendererSize / (gridDimensions * rendererSize)) viewport.append(float(col+1)*rendererSize / (gridDimensions * rendererSize)) viewport.append(float(gridDimensions - row) * rendererSize / (gridDimensions * rendererSize)) if idx > (len(geometricObjects) - 1): continue renderers[idx].SetViewport(viewport) self.vtkWidget.GetRenderWindow().AddRenderer(renderers[idx]) renderers[idx].AddActor(actors[idx]) renderers[idx].AddActor(textactors[idx]) renderers[idx].SetBackground(0.4,0.3,0.2) self._initialized = False def showEvent(self, evt): if not self._initialized: self.iren.Initialize() self._initialized = True class MainPage(QtGui.QMainWindow): def __init__(self, parent = None): super(MainPage, self).__init__(parent) self.setCentralWidget(VTKFrame()) self.setWindowTitle("Geometric objects example") def categories(self): return ['Geometric Objects'] def mainClasses(self): return ['vtkArrowSource', 'vtkConeSource', 'vtkCubeSource', 'vtkCylinderSource', 'vtkDiskSource', 'vtkLineSource', 'vtkRegularPolygonSource', 'vtkSphereSource', 'vtkTextProperty'] if __name__ == '__main__': app = QtGui.QApplication(sys.argv) w = MainPage() w.show() sys.exit(app.exec_())
import os import json import logging from copy import deepcopy from subprocess import call, check_output, DEVNULL from retrying import retry FILE = 'test/parlament_processed_sessions.json' BASE = '../parlament-scrape/audio' def main(): basepath = BASE sessions = json.load(open(FILE)) interventions = {} for ple_code, session in sessions.items(): for yaml, value in session.items(): if len(value['urls']) > 1: print("%s multiple audio file, skipping"%value['urls']) else: uri = get_uri(value['urls'][0][1], basepath) if not uri: print('%s still not found'%value['urls'][0][1]) value['urls'][0][1] = uri value['source'] = yaml new_value = deepcopy(value) new_key = get_new_key(ple_code, yaml) if interventions.get(new_key): raise KeyError('%s already exists'%new_key) new_value['ple_code'] = ple_code interventions[new_key] = new_value with open(FILE.replace('.json', '_local02.json'), 'w') as out: json.dump(interventions, out, indent=4) def get_uri(url, basepath): basename = os.path.basename(url) uri = os.path.abspath(os.path.join(basepath, basename[0], basename[1], basename)) if not os.path.isfile(uri): logging.info('attempting to download %s'%url) curl_download(url, uri) if os.path.isfile(uri): return uri else: return None @retry(stop_max_attempt_number=3, wait_fixed=1000) def curl_download(uri, filepath): msg = 'checking %s'%uri logging.info(msg) # check the http headers status, uri = get_status_code(uri) if status == 302: # redirect uri should have been extracted to the uri variable status, uri = get_status_code(uri) if status != 200: error = 'the resource in the url %s cannot be reached'\ ' with status %i.'%(uri,status) logging.error(error) if status == 401: return None else: raise ConnectionError(error) # create file with open(filepath,'w') as fout: cmd = ['curl','-g',uri] logging.info("downloading %s"%uri) call(cmd, stdout=fout, stderr=DEVNULL) #seems dangerous but 404s are #caught by the get_status_code def get_status_code(url): cmd = ['curl','-I',url] header = check_output(cmd, stderr=DEVNULL) header_list = header.split(b'\n') code = int(header_list[0].split()[1]) uri = url if code == 302: for h in header_list: if h.startswith(b'Location: '): uri = h.strip().decode('ascii')[10:] if 'http' not in uri: code = 401 return code, uri def get_new_key(ple_code, uri): no = os.path.basename(uri).split('.')[0] if not no: msg = 'smt wrong with uri %s'%uri raise ValueError(msg) return '_'.join([ple_code, no]) if __name__ == "__main__": main()
""" USAGE: pyOTDR SOR_file [format] : format: JSON (default) or XML For using pyOTDR in a module, see the code in pyOTDR/main.py. The two main functions are: * sorparse(): to parse the SOR file * tofile(): to dump the results into a JSON file """ from .read import sorparse, sorstream from .dump import tofile from .main import main from .main import ConvertSORtoTPL __version__ = "1.0.0.c2"
from __future__ import print_function, absolute_import import time import torch from torch.autograd import Variable from .evaluation_metrics import accuracy from .loss import OIMLoss, TripletLoss from .utils.meters import AverageMeter class BaseTrainer(object): def __init__(self, model, criterions, print_freq=1): super(BaseTrainer, self).__init__() self.model = model self.criterions = criterions self.print_freq = print_freq def train(self, epoch, data_loader, optimizer): self.model.train() # for name, param in self.model.named_parameters(): # if 'classifier' in name: # param.requires_grad = False batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() precisions = AverageMeter() end = time.time() for i, inputs in enumerate(data_loader): data_time.update(time.time() - end) inputs, targets = self._parse_data(inputs) loss, prec1 = self._forward(inputs, targets, epoch) losses.update(loss.data[0], targets.size(0)) precisions.update(prec1, targets.size(0)) optimizer.zero_grad() loss.backward() #add gradient clip for lstm for param in self.model.parameters(): try: param.grad.data.clamp(-1., 1.) except: continue optimizer.step() batch_time.update(time.time() - end) end = time.time() if (i + 1) % self.print_freq == 0: print('Epoch: [{}][{}/{}]\t' 'Time {:.3f} ({:.3f})\t' 'Data {:.3f} ({:.3f})\t' 'Loss {:.3f} ({:.3f})\t' 'Prec {:.2%} ({:.2%})\t' .format(epoch, i + 1, len(data_loader), batch_time.val, batch_time.avg, data_time.val, data_time.avg, losses.val, losses.avg, precisions.val, precisions.avg)) def _parse_data(self, inputs): raise NotImplementedError def _forward(self, inputs, targets): raise NotImplementedError class Trainer(BaseTrainer): def _parse_data(self, inputs): imgs, _, pids, _ = inputs inputs = [Variable(imgs)] targets = Variable(pids.cuda()) return inputs, targets def _forward(self, inputs, targets, epoch): outputs = self.model(*inputs) #outputs=[x1,x2,x3] #new added by wc # x1 triplet loss loss_tri, prec_tri = self.criterions[0](outputs[0], targets, epoch) # x2 triplet loss loss_global, prec_global = self.criterions[1](outputs[1], targets, epoch) return loss_tri+loss_global, prec_global
class ListQ(): def __init__(self): self.lista = [] def __str__(self): """Returnerar köns element som en sträng. Tips: Titta på kursens FAQ (under Hjälp)""" return "ListQ Class" def put(self,x): """Stoppar in x sist i kön """ self.lista.append(x) def get(self): """Plockar ut och returnerar det som står först i kön """ firstValue = self.lista.pop(0) return firstValue def isEmpty(self): """Returnerar True om kön är tom, False annars """ return (self.lista == [])
import requests import os # characters_img = [ # 'villager' # ] # characters = [ # 'mario' # ] # characters_ic = [ # 'gaogaen', 'packun_flower' # ] characters = [ 'mario', 'donkey_kong', 'link', 'samus', 'dark_samus', 'yoshi', 'kirby', 'fox', 'pikachu', 'luigi', 'ness', 'captain_falcon', 'jigglypuff', 'peach', 'daisy', 'bowser', 'ice_climbers', 'sheik', 'zelda', 'dr_mario', 'pichu', 'falco', 'marth', 'lucina', 'young_link', 'ganondorf', 'mewtwo', 'roy', 'chrom', 'mr_game_and_watch', 'meta_knight', 'pit', 'dark_pit', 'zero_suit_samus', 'wario', 'snake', 'ike', 'pokemon_trainer', 'diddy_kong', 'lucas', 'sonic', 'king_dedede', 'olimar', 'lucario', 'rob', 'toon_link', 'wolf', 'villager', 'mega_man', 'wii_fit_trainer', 'rosalina_and_luma', 'little_mac', 'greninja', 'mii_fighter', 'palutena', 'pac_man', 'robin', 'shulk', 'bowser_jr', 'duck_hunt', 'ryu', 'ken', 'cloud', 'corrin', 'bayonetta', 'inkling', 'ridley', 'simon', 'richter', 'king_k_rool', 'shizue', 'incineroar', 'piranha_plant', 'joker', 'dq_hero', 'banjo_and_kazooie', 'terry' ] def fetch_character_img(): for char in characters: os.mkdir("chars/"+char) for i in range(10): stringy = 'https://www.smashbros.com/assets_v2/img/fighter/{0}/main{1}.png'.format(char,i) img_data = requests.get(stringy).content with open('./chars/'+char+'/img_'+char+'_skin_'+str(i)+'.png', 'wb') as handler: handler.write(img_data) # def fetch_character_icon(): # for i in range(len(characters_ic)): # image_url = 'https://www.smashbros.com/assets_v2/img/fighter/pict/{0}.png'.format(characters_ic[i]) # img_data = requests.get(image_url).content # with open('ic_{0}.png'.format(characters_ic[i]), 'wb') as handler: # handler.write(img_data) # print('Icon Downloaded... >>> ' + characters_ic[i]) fetch_character_img() # fetch_character_icon()
#! usr/bin/env python # -*-coding:utf-8-*- # author:yanwenming # date:2020-06-30 import unittest from page.init import * import requests import time as t import os import sys curPath = os.path.abspath ( os.path.dirname ( __file__ ) ) rootPath = os.path.split ( curPath )[0] sys.path.append ( rootPath ) print ( sys.path ) class YiChangBaobei(Init): ''' 用于工程师在APP上进行请假操作,将名下所有的日期依次进行请假操作 ''' def test01_Login ( self ) : '''工程师正常登录''' print('执行case1') url = self.default_url + 'v1/app/login' param = {"mobile": self.mobile, "code" : self.code} r = requests.post( url, param, verify = False) t.sleep( 2 ) response1 = r.json() self.token = response1['data']['access_token'] self.assertEqual(response1['status'] , 200) with open('token.txt', 'w') as f: f.write(response1['data']['access_token']) t.sleep(1) def getToken(self) : '''读取token文件内容''' with open('token.txt', 'r') as f: return f.read() def test02_GetWeek ( self ) : '''获取异常报备类型数据''' print( '\n 执行case2' ) url = self.default_url + 'v4/worker-plan/get-week' param = {"access_token": self.getToken(), "version_code": self.version_code, "master_no": self.master_no} r = requests.get(url, param , verify = False) t.sleep( 2 ) response2 = r.json() print(response2) d = response2['data'] # res = [item[key] for item in d for key in item] # print(res) global b #存取前端展示的日期 b = [] for value in d.values(): b.append(value.split('(')[0]) print('b is :%s' % b) self.assertEqual ( response2['message'] , '获取成功' ) def test03_LeaveApplication ( self ) : '''获取异常报备类型数据''' print('\n 执行case3') for riqing in b: url = self.default_url + 'v4/worker-plan/leave-application?access_token=' + self.getToken() + '&version_code=' + self.version_code + '&master_no=' + self.master_no param = {"master_no": self.master_no, "date_time" : riqing, "content": '我要请假'} r = requests.post( url , param , verify = False ) t.sleep ( 2 ) response3 = r.json () print ('riqing %s %s' %(riqing,response3)) if __name__ == '__main__': unittest.main(verbosity = 2)
#!/usr/bin/env python import numpy as np import math import matplotlib.pyplot as plt from pylab import * import h5py from matplotlib.colors import LogNorm ## Set the Zero zero = 1.0e-20 ## Set the maximum size of xenon maxSize = 1000001 ## Create plots fig = plt.figure() title = 'Xenon Distribution' fig.suptitle(title,fontsize=22) xePlot = plt.subplot(111) ## Create lists of file names to read from, time step number in the file, associated line colors, labels name = ['/home/sophie/Workspace/xolotl-plsm-build/script/xolotlStop.h5', '/home/sophie/Workspace/xolotl-plsm-build/script/xolotlStop.h5', '/home/sophie/Workspace/xolotl-plsm-build/script/xolotlStop.h5', '/home/sophie/Workspace/xolotl-plsm-build/script/xolotlStop.h5'] timestep = [25,35,45,55] col = ['black', 'blue', 'magenta', 'green'] lab = ['TS 25', 'TS 35', 'TS 45', 'TS 55'] for i in range(len(name)): ## Open the file f = h5py.File(name[i], 'r') ## Open the concentration group groupName ='concentrationsGroup/concentration_0_' + str(timestep[i]) concGroup = f[groupName] ## Read the concentration and index datasets concDset = concGroup['concs'] indexDset = concGroup['concs_startingIndices'] ## Read the time at the chosen time step time = concGroup.attrs['absoluteTime'] ## Read how many normal and super clusters there are networkGroup = f['networkGroup'] totalSize = networkGroup.attrs['totalSize'] ## Create the mesh and data array x = np.empty([maxSize]) xeArray = np.empty([maxSize]) for j in range(maxSize): x[j] = j xeArray[j] = zero pos = 0 ## if 0D for j in range(indexDset[pos], indexDset[pos+1]): ## Skip the moments for now if (int(concDset[j][0]) > totalSize - 1): continue ## Get the cluster bounds groupName = str(concDset[j][0]) clusterGroup = networkGroup[groupName] bounds = clusterGroup.attrs['bounds'] ## Loop on Xe size for l in range(bounds[0], bounds[1]+1): ## Fill the array xeArray[l] = xeArray[l] + concDset[j][1] ## Plot the data x = np.delete(x,(0), axis=0) xeArray = np.delete(xeArray,(0), axis=0) xePlot.plot(x, xeArray, lw=4, color=col[i], label=lab[i], alpha=0.75) ## Some formatting xePlot.set_xlabel("Cluster Size",fontsize=22) xePlot.set_ylabel("Concentration (# / nm3)",fontsize=22) xePlot.set_xlim([1, 1000000]) xePlot.set_ylim([1.0e-16, 1.0e-1]) xePlot.set_xscale('log') xePlot.set_yscale('log') xePlot.tick_params(axis='both', which='major', labelsize=20) ## Plot the legends l2 = xePlot.legend(loc='best') setp(l2.get_texts(), fontsize=25) ## Show the plots plt.show()
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/6/10 下午2:21 # @Author : Lucas Ma # @File : a1 class Animal(object): def run(self): print('Animal is running...') class Dog(Animal): print('Dog is barking') def run(self): print('Dog is running...') class Cat(Animal): def run(self): print('Cat is running...') class Tortoise(Animal): def run(self): print('Tortoise is running slowly...')
import torch import torch.nn as nn import torch.nn.functional as F class UNet(nn.Module): def __init__(self): super().__init__() #ENCODER self.convE1 = nn.Conv2d(1, 64, kernel_size = (3, 3), stride = 1, padding = 1)#512x512 self.convE2 = nn.Conv2d(64, 64, kernel_size = (3, 3), stride = 1, padding = 1)#512x512 self.convE3 = nn.Conv2d(64, 128, kernel_size = (3, 3), stride = 1, padding = 1)#256x256 self.convE4 = nn.Conv2d(128, 128, kernel_size = (3, 3), stride = 1, padding = 1)#256x256 self.convE5 = nn.Conv2d(128, 256, kernel_size = (3, 3), stride = 1, padding = 1)#128x128 self.convE6 = nn.Conv2d(256, 256, kernel_size = (3, 3), stride = 1, padding = 1)#128x128 self.convE7 = nn.Conv2d(256, 512, kernel_size = (3, 3), stride = 1, padding = 1)#64x64 self.convE8 = nn.Conv2d(512, 512, kernel_size = (3, 3), stride = 1, padding = 1)#64x64 self.convE9 = nn.Conv2d(512, 1024, kernel_size = (3, 3), stride = 1, padding = 1)#32x32 self.convE10 = nn.Conv2d(1024, 1024, kernel_size = (3, 3), stride = 1, padding = 1)#32x32 #DECODER self.convT1 = nn.ConvTranspose2d(1024, 512, kernel_size = (4, 4), stride = 2, padding = 1)#64x64 #concat self.convD1 = nn.Conv2d(1024, 512, kernel_size = (3, 3), stride = 1, padding = 1)#64x64 self.convD2 = nn.Conv2d(512, 512, kernel_size = (3, 3), stride = 1, padding = 1)#64x64 self.convT2 = nn.ConvTranspose2d(512, 256, kernel_size = (4, 4), stride = 2, padding = 1)#128x128 #concat self.convD3 = nn.Conv2d(512, 256, kernel_size = (3, 3), stride = 1, padding = 1)#128x128 self.convD4 = nn.Conv2d(256, 256, kernel_size = (3, 3), stride = 1, padding = 1)#128x128 self.convT3 = nn.ConvTranspose2d(256, 128, kernel_size = (4, 4), stride = 2, padding = 1)#256x256 #concat self.convD5 = nn.Conv2d(256, 128, kernel_size = (3, 3), stride = 1, padding = 1)#256x256 self.convD6 = nn.Conv2d(128, 128, kernel_size = (3, 3), stride = 1, padding = 1)#256x256 self.convT4 = nn.ConvTranspose2d(128, 64, kernel_size = (4, 4), stride = 2, padding = 1) #concat self.convD7 = nn.Conv2d(128, 64, kernel_size = (3, 3), stride = 1, padding = 1)#512x512 self.convD8 = nn.Conv2d(64, 64, kernel_size = (3, 3), stride = 1, padding = 1)#512x512 self.convD9 = nn.Conv2d(64, 1, kernel_size = (3, 3), stride = 1, padding = 1)#512x512 def forward(self, x): #ENCODER x1 = F.leaky_relu(self.convE1(x)) x1_concat = F.leaky_relu(self.convE2(x1)) x2 = F.max_pool2d(x1_concat, kernel_size = (2, 2), stride = 2, padding = 0)#256x256 x2 = F.leaky_relu(self.convE3(x2)) x2_concat = F.leaky_relu(self.convE4(x2)) x3 = F.max_pool2d(x2_concat, kernel_size = (2, 2), stride = 2, padding = 0)#128x128 x3 = F.leaky_relu(self.convE5(x3)) x3_concat = F.leaky_relu(self.convE6(x3)) x4 = F.max_pool2d(x3_concat, kernel_size = (2, 2), stride = 2, padding = 0)#64x64 x4 = F.leaky_relu(self.convE7(x4)) x4_concat = F.leaky_relu(self.convE8(x4)) x5 = F.max_pool2d(x4_concat, kernel_size = (2, 2), stride = 2, padding = 0)#32x32 x5 = F.leaky_relu(self.convE9(x5)) x5 = F.leaky_relu(self.convE10(x5)) #DECODER x5 = self.convT1(x5) x5 = torch.cat((x5, x4_concat), 1) x5 = F.leaky_relu(self.convD1(x5)) x5 = F.leaky_relu(self.convD2(x5)) x5 = self.convT2(x5) x5 = torch.cat((x5, x3_concat), 1) x5 = F.leaky_relu(self.convD3(x5)) x5 = F.leaky_relu(self.convD4(x5)) x5 = self.convT3(x5) x5 = torch.cat((x5, x2_concat), 1) x5 = F.leaky_relu(self.convD5(x5)) x5 = F.leaky_relu(self.convD6(x5)) x5 = self.convT4(x5) x5 = torch.cat((x5, x1_concat), 1) x5 = F.leaky_relu(self.convD7(x5)) x5 = F.leaky_relu(self.convD8(x5)) x5 = torch.sigmoid(self.convD9(x5)) return x5 unet = UNet() unet.cuda()
import csv import xmlrpclib db = "eym" uid = 1 password = "admin" sock = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/object') tipos = open('tipos-articulos.csv', 'r') sub_tipos = open('sub-tipos-articulos.csv', 'r') product = open('articulostodos2.csv', 'r') dtipos = csv.reader(tipos, delimiter=',') dsub_tipos = csv.reader(sub_tipos, delimiter=',') dproduct = csv.reader(product, delimiter=',') dtipos_list = [row for row in dtipos] dsub_tipos_list = [row for row in dsub_tipos] dproduct_list = [row for row in dproduct] subtipo_list = [] # for tipo in dtipos_list: # vals = {'name': tipo[1], # 'parent_id': 2, # 'property_account_expense_categ': 179, # 'property_account_income_categ': 167, # 'property_stock_account_input_categ': False, # 'property_stock_account_output_categ': False, # 'property_stock_journal': 18, # 'property_stock_valuation_account_id': False, # 'removal_strategy_id': False, # 'route_ids': [[6, False, []]], # 'type': 'view', # "oldref": tipo[2]} # cat_id = sock.execute(db, uid, password, "product.category", "create", vals) # count = 0 # for subtipo in dsub_tipos_list: # if tipo[2] == subtipo[2]: # subtipo_dict = {'name': subtipo[3], # 'parent_id': cat_id, # 'property_account_expense_categ': 179, # 'property_account_income_categ': 167, # 'property_stock_account_input_categ': False, # 'property_stock_account_output_categ': False, # 'property_stock_journal': 18, # 'property_stock_valuation_account_id': False, # 'removal_strategy_id': False, # 'route_ids': [[6, False, []]], # 'type': 'normal', # "oldref": subtipo[0]} # subtipo_list.append(subtipo_dict) # count += 1 # print count # # count = 0 # for sub_tipos in subtipo_list: # cat_id = sock.execute(db, uid, password, "product.category", "create", sub_tipos) # count += 1 # print count product_list = [] count = 0 for product in dproduct_list: category_id = sock.execute(db, uid, password, "product.category", "search", [("oldref", "=", product[6])]) if category_id: category_id = int(category_id[0]) else: category_id = 2 if product[8] == '': default_code = product[4] else: default_code = product[8] prod_dic = { 'active': True, 'attribute_line_ids': [], 'categ_id': category_id, 'company_id': 1, 'cost_method': 'standard', 'default_code': default_code, 'description': False, 'description_purchase': False, 'description_sale': False, 'ean13': False, 'image_medium': False, 'list_price': product[24], 'loc_case': False, 'loc_rack': False, 'loc_row': False, 'mes_type': 'fixed', 'message_follower_ids': False, 'message_ids': False, 'name': product[2], 'packaging_ids': [], 'product_manager': False, 'property_account_expense': False, 'property_account_income': False, 'property_stock_account_input': False, 'property_stock_account_output': False, 'property_stock_inventory': 5, 'property_stock_procurement': 6, 'property_stock_production': 7, 'route_ids': [[6, False, []]], 'sale_delay': 0, 'sale_ok': True, 'seller_ids': [], 'standard_price': product[38], 'state': False, 'supplier_taxes_id': [[6, False, [8]]], 'taxes_id': [[6, False, [19]]], 'track_all': False, 'track_incoming': False, 'track_outgoing': False, 'type': 'product', 'uom_id': 1, 'uom_po_id': 1, 'uos_coeff': 1, 'uos_id': False, 'valuation': 'manual_periodic', 'volume': 0, 'warranty': 0, 'weight': 0, 'weight_net': 0} try: sock.execute(db, uid, password, "product.template", "create", prod_dic) except: pass count += 1 print count
import pandas as pd import numpy as np import datetime def week_index(date): month_dictionary = {"JAN" : 1, "FEB" : 2, "MAR" : 3, "APR" : 4, "MAY" : 5, "JUN" : 6, "JUL" : 7, "AUG" : 8, "SEPT" : 9, "OCT" : 10, "NOV" : 11, "DEC" : 12} day = int(date[:2]) month = int(month_dictionary[str(date[2:5])]) year = int(date[5:]) transaction_date = datetime.date(year = year, month = month, day = day) old_date = datetime.date(year = 2016, month = 1, day = 4 ) m1 = old_date - datetime.timedelta(days = old_date.weekday()) m2 = transaction_date - datetime.timedelta(days = transaction_date.weekday()) weeks = (m2 - m1).days/7 return weeks writer = pd.ExcelWriter("week_index.xlsx", engine = 'xlsxwriter') workbook = writer.book worksheet = workbook.add_worksheet(name = 'test') df0 = pd.read_csv(filepath_or_buffer = "OBS_TXNS.csv", nrows = 10000) #print(df0.head()) my_dict = {"tran_date_new" : df0["tran_date_new"], "dr_cr" : df0["dr_cr"], "txn_amt" : df0["txn_amt"], "Channel" : df0["Channel"]} my_df = pd.DataFrame(data = my_dict) my_df.dropna(axis = 0, inplace = True) my_df["amount"] = np.where(my_df["dr_cr"] == 'D', -my_df["txn_amt"], my_df["txn_amt"]) my_df["week_index"] = my_df["tran_date_new"].apply(func = lambda x : week_index(x)) max_weeks = int(my_df["week_index"].max()) print(my_df["week_index"]) channels = list(my_df["Channel"].unique()) channels = [x for x in channels if str(x) != 'nan'] channels_dict = {str(channels[i]) : i for i in range(len(channels))} # print(channels_dict) worksheet.write(0, 0, "Channels") worksheet.write(0, 1, "Week Index") for i in range(len(channels)): worksheet.write(i + 2, 0, channels[i]) for i in range(max_weeks): worksheet.write(1, i + 1, i) # print(max_weeks) # print(my_df[(my_df.week_index == 417 ) & (my_df.Channel == "ATM")]) for i in range(len(channels)): sum = 0 for j in range(max_weeks): transactions = my_df[(my_df.week_index == j ) & (my_df.Channel == channels[i])] sum += transactions["amount"].sum() worksheet.write(i + 2, j + 1, sum) workbook.close() exit()
from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.auth.models import AnonymousUser from django.core.exceptions import PermissionDenied from django.http import HttpResponse from django.test import RequestFactory, TestCase from django.utils import timezone from django.views.generic import DetailView, View from qa.mixins import AuthorRequiredMixin, LoginRequired from qa.models import Question class SomeView(LoginRequired, View): def get(self, request): return HttpResponse('something') class SomeAuthorRequiredView(AuthorRequiredMixin, DetailView): model = Question def get(self, request, pk): return HttpResponse('something') class TestMixins(TestCase): """ Tests functionalities for the mixins. Views that implement them should only test that they are subclasses of the given mixin. """ def setUp(self): self.factory = RequestFactory() def test_login_required_redirects(self): """ Anonymous users should be redirected to the LOGIN_URL """ request = self.factory.get('some-random-place') request.user = AnonymousUser() response = SomeView.as_view()(request) self.assertEqual(response.status_code, 302) self.assertTrue(settings.LOGIN_URL in response.url) def test_login_request_access(self): """ Authenticated users should be able to access the view. """ request = self.factory.get('some-random-place') request.user = get_user_model().objects.create_user( username='test_user', password='top_secret' ) response = SomeView.as_view()(request) self.assertEqual(response.status_code, 200) def test_author_required_allow_object_user(self): """ The owner of the object should be able to access the view """ request = self.factory.get('some-random-place') request.user = get_user_model().objects.create_user( username='test_user', password='top_secret' ) question = Question.objects.create( title="Another Question", description="A not so long random text to fill this field", pub_date=timezone.datetime(2016, 1, 6, 0, 0, 0), reward=0, user=request.user, closed=False, ) response = SomeAuthorRequiredView.as_view()(request, pk=question.pk) self.assertEqual(response.status_code, 200) def test_author_required_not_allow_not_object_user(self): """ A different user than the object's owner should not be able to access the view, a PermissionDenied should be raised """ request = self.factory.get('some-random-place') request.user = AnonymousUser() user = get_user_model().objects.create_user( username='test_user', password='top_secret' ) question = Question.objects.create( title="Another Question", description="A not so long random text to fill this field", pub_date=timezone.datetime(2016, 1, 6, 0, 0, 0), reward=0, user=user, closed=False, ) with self.assertRaises(PermissionDenied): SomeAuthorRequiredView.as_view()( request, pk=question.pk)
from selenium import webdriver import time import sys driver = webdriver.Chrome() driver.get("https://www.surveycake.com/s/2xOyK") time.sleep(3) #現金 js="var q=document.documentElement.scrollTop=100000" driver.execute_script(js) button = driver.find_element_by_xpath("/html/body/div/div/div/div[2]/div/div/div[1]/div/div[1]/div[1]/div[2]/div[2]/div[2]/div/div[1]/div/span/span[2]") button.click() #實領 6,000元或以上 js="var q=document.documentElement.scrollTop=100000" driver.execute_script(js) button = driver.find_element_by_xpath("/html/body/div/div/div/div[2]/div/div/div[1]/div/div[1]/div[2]/div[2]/div[2]/div[2]/div/div[7]/div/span/span[2]") button.click() #新北市 js="var q=document.documentElement.scrollTop=100000" driver.execute_script(js) button = driver.find_element_by_xpath("/html/body/div/div/div/div[2]/div/div/div[1]/div/div[1]/div[3]/div[2]/div[2]/div[2]/div/div[1]/div/span/span[2]") button.click() #40-49歲 js="var q=document.documentElement.scrollTop=100000" driver.execute_script(js) button = driver.find_element_by_xpath("/html/body/div/div/div/div[2]/div/div/div[1]/div/div[1]/div[4]/div[2]/div[2]/div[2]/div/div[4]/div/span/span[2]") button.click() #男性 js="var q=document.documentElement.scrollTop=100000" driver.execute_script(js) button = driver.find_element_by_xpath("/html/body/div/div/div/div[2]/div/div/div[1]/div/div[1]/div[5]/div[2]/div[2]/div[2]/div/div[1]/div/span/span[2]") button.click() #送出 js="var q=document.documentElement.scrollTop=100000" driver.execute_script(js) button = driver.find_element_by_xpath("/html/body/div/div/div/div[2]/div/div/div[1]/div/div[2]/button/span") button.click() #確定送出 js="var q=document.documentElement.scrollTop=100000" driver.execute_script(js) button = driver.find_element_by_xpath("/html/body/div[2]/div/footer/button[2]/span") button.click() time.sleep(2) # driver.close() driver.quit()