text stringlengths 8 6.05M |
|---|
import cv2
import time
class CameraView:
def __init__(self):
self.frame_count = 0
self.fps = 0
self.last_rendered_sec = int(time.time())
self.shown = True
self.display_results = True
def draw_text(self, image, text, left, bottom):
if self.display_results:
cv2.putText(image, text, (left, bottom), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), thickness=1)
def draw_objects(self, image, objects):
if self.display_results:
for obj in objects:
y = obj.top
if y < 10:
y = 10
self.draw_text(image, f"{obj.probability:.2f}", obj.left, y)
cv2.rectangle(image, (obj.left, obj.top), (obj.right, obj.bottom), (0,255,0), 1)
def draw_centroids(self, image, centroids_dict):
if self.display_results:
# loop over the tracked objects
for (objectID, centroid) in centroids_dict.items():
# draw both the ID of the object and the centroid of the
# object on the output image
self.draw_text(image, f"ID {objectID}", centroid[0] - 10, centroid[1] - 10)
cv2.circle(image, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
def show(self, image):
current_sec = int(time.time())
if not self.last_rendered_sec == current_sec:
self.last_rendered_sec = current_sec
self.fps = self.frame_count
self.frame_count = 0
self.frame_count += 1
self.draw_text(image, f"FPS : {self.fps}", 5, 15)
cv2.imshow("hit q key to exit, d key to show/hide display info.", image)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
self.shown = False
cv2.destroyAllWindows()
elif key == ord('d'):
self.display_results = not self.display_results
|
#!/usr/bin/env python
import os
import sys
from django.core.management import execute_from_command_line
if __name__ == "__main__":
# Making sure we can import each app
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(PROJECT_ROOT, 'avere/apps'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "avere.settings")
execute_from_command_line(sys.argv)
|
#!/usr/bin/python
import requests
page=requests.get('https://www.instagram.com/sooraj.s__/')
print(page.status_code)
from bs4 import BeautifulSoup
soup=BeautifulSoup(page.content,'html.parser')
soup.find_all(class_="")
txt=[soup.get_text() for m in soup]
print(txt[0])
newfile=open('text.txt','w')
newfile.write(txt[0])
newfile.close()
|
from django.contrib import admin
from django.urls import path
from polls.views import polls_list, option_vote, polls_view, polls_index, polls_create
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('', polls_index, name='polls-index'),
path('polls/list/', polls_list, name='polls-list'),
path('polls/create/', polls_create , name='polls-create'),
path('vote/<int:option_id>/', option_vote, name='polls-vote'),
path('polls/view/<int:poll_id>/', polls_view, name='polls-view')
]
urlpatterns += staticfiles_urlpatterns()
|
paper = {}
counter = 0
i = 0
for i in range(10):
for j in range(10):
paper.update({(i,j):0})
while i < 50:
try:
x, y, s = list(map(int,input().split(",")))
except:
break
if s == 3:
paper[(x+2,y)] += 1
paper[(x-2,y)] += 1
paper[(x,y+2)] += 1
paper[(x,y-2)] += 1
if s >= 2:
paper[(x+1,y+1)] += 1
paper[(x+1,y-1)] += 1
paper[(x-1,y+1)] += 1
paper[(x-1,y-1)] += 1
paper[(x,y)] += 1
paper[(x,y-1)] += 1
paper[(x,y+1)] += 1
paper[(x+1,y)] += 1
paper[(x-1,y)] += 1
i += 1
for i in range(10):
for j in range(10):
if paper[(i,j)] == 0:
counter += 1
print(counter)
print(max(paper.values())) |
'''
This module provides a set of useful functions on strings
To transform them in lists :
- `explode_protected` to explode a string in a smart way
'''
def explode_protected(delim, str, protectors = ['()']) -> list:
"""
like explode function but it will protect delimiters that are protected by protector<br>
look in test/test_string.py for examples
Warnings
--------
This does not check if the protectors are balanced and it only works for delimiters of length 1
Parameters
----------
delim: str | list
delimiter string to explode str or a list of delimiters.<br>
Only delimiters of length 1 are supported
str: str
the string to be exploded
protectors: list, optional
list of strings like `['()', '""']`. All strings should be of length 2
1st char of string is the opening protector, the 2nd if the closing protector
so all delim characters between the protectors will be ignored during epxlode process
Returns
-------
list
the list of exploded parts of str
"""
mem = 0
result = []
# prepare delimiters
delims = delim if isinstance(delim, list) else [delim]
for d in delims:
if len(d) > 1: raise Exception("In explode_protected, all delimiters should be of length 1 !")
protect_starts = list(map(lambda pr: pr[0], protectors))
protect_ends = list(map(lambda pr: pr[1], protectors))
new_str = ''
for c in str:
if c in protect_starts:
new_str += c
mem += 1
elif c in protect_ends:
new_str += c
mem -= 1
elif c in delims:
if mem == 0:
result.append(new_str.strip())
new_str = ''
else:
new_str += c
else:
new_str += c
if new_str != '': result.append(new_str.strip())
return result
|
import random
from settings import (
BAG_SIZE,
MAX_ITENS,
POPULATION_SIZE,
SELECTION_PERCENT,
MUTATION_PERCENT,
MAX_ITERATION,
ITENS
)
from chromosome import Chromosome
from utils import (
roulette_selection,
crossover,
chromosome_is_valid,
mutate,
best,
get_bests
)
population = []
generation = 1
best_solution = None
print("Generating a initial valid population...")
while len(population) <= POPULATION_SIZE:
gene = [random.randint(0, 1) for i in range(MAX_ITENS)]
c = Chromosome(gene)
population.append(c)
print("Searching the solution...")
while (generation < MAX_ITERATION):
selected_population = []
new_population = []
elitism_selecteds_size = POPULATION_SIZE - int((POPULATION_SIZE * SELECTION_PERCENT) / 100)
new_population.extend(
get_bests(population, elitism_selecteds_size)
)
for i in range(int((POPULATION_SIZE * SELECTION_PERCENT) / 100)):
selected_population.append(roulette_selection(population))
selected_population = zip(
selected_population,
selected_population[int(len(selected_population) / 2):]
)
for x in selected_population:
sons = crossover(x[0].gene, x[1].gene)
new_population.extend(sons)
population_to_mutate = int((POPULATION_SIZE * MUTATION_PERCENT) / 100)
for i in range(population_to_mutate):
new_population[random.randint(0, POPULATION_SIZE - 1)] = mutate(new_population[random.randint(0, POPULATION_SIZE - 1)].gene)
population = new_population
best_solution = best(population).fitness
generation += 1
print(best(population).gene)
|
import utils
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import *
from sklearn.svm import LinearSVC
#Note: You can reuse code that you wrote in etl.py and models.py and cross.py over here. It might help.
# PLEASE USE THE GIVEN FUNCTION NAME, DO NOT CHANGE IT
'''
You may generate your own features over here.
Note that for the test data, all events are already filtered such that they fall in the observation window of their respective patients. Thus, if you were to generate features similar to those you constructed in code/etl.py for the test data, all you have to do is aggregate events for each patient.
IMPORTANT: Store your test data features in a file called "test_features.txt" where each line has the
patient_id followed by a space and the corresponding feature in sparse format.
Eg of a line:
60 971:1.000000 988:1.000000 1648:1.000000 1717:1.000000 2798:0.364078 3005:0.367953 3049:0.013514
Here, 60 is the patient id and 971:1.000000 988:1.000000 1648:1.000000 1717:1.000000 2798:0.364078 3005:0.367953 3049:0.013514 is the feature for the patient with id 60.
Save the file as "test_features.txt" and save it inside the folder deliverables
input:
output: X_train,Y_train,X_test
'''
def my_features(filtered_events, feature_map):
#TODO: complete this
events_to_idx = pd.merge(filtered_events, feature_map, on='event_id')
events_to_idx = events_to_idx[['patient_id', 'idx', 'value']]
events_to_idx = events_to_idx.dropna()
events_sum = events_to_idx[events_to_idx['idx'] < 2680]
events_count = events_to_idx[events_to_idx['idx'] >= 2680]
events_counts = events_count.groupby(['patient_id', 'idx']).agg('count')
# events_counts.columns = ['patient_id', 'event_id', 'value']
events_sums = events_sum.groupby(['patient_id', 'idx']).agg('sum')
# events_sums.columns = ['patient_id', 'event_id', 'value']
total_events = pd.concat([events_counts, events_sums])
total_events.columns = ['value']
total_events = total_events.reset_index()
##min- max
total_events1 = total_events[['idx', 'value']]
max_events_value = total_events1.groupby(['idx']).max()
max_events_value = max_events_value.reset_index()
max_events_value.columns = ['idx', 'max_value']
df1 = pd.merge(total_events, max_events_value, on='idx')
df1_not_zero = df1[df1['max_value'] != 0]
df1_not_zero['value'] = df1_not_zero['value'] / df1_not_zero['max_value']
df1_zero = df1[df1['max_value'] == 0]
df1_zero['value'] = 1.0
# df1_zero = df1_zero[['patient_id', 'idx', 'value', 'min_value', 'max-min']]
aggregated_events = pd.concat([df1_zero, df1_not_zero])
aggregated_events = aggregated_events[['patient_id', 'idx', 'value']]
aggregated_events.columns = ['patient_id', 'feature_id', 'feature_value']
# create features
aggregated_events['merged'] = aggregated_events.apply(lambda row: (row['feature_id'], row['feature_value']), axis=1)
patient_features = aggregated_events.groupby('patient_id')['merged'].apply(lambda x: x.tolist()).to_dict()
deliverable1 = open('../deliverables/test_features.txt', 'wb')
#lines = 0
for key in sorted(patient_features):
line = "%d" %(key)
for value in sorted(patient_features[key]):
merged = "%d:%.6f" %(value[0], value[1])
line = line +" " + merged
#lines+=1
deliverable1.write((line + " " + "\n").encode())
#if lines >=633:
#break
'''
You can use any model you wish.
input: X_train, Y_train, X_test
output: Y_pred
'''
def my_classifier_predictions(X_train,Y_train,X_test):
#TODO: complete this
#regr1 = DecisionTreeClassifier(max_depth= 5)
regr1 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=5), n_estimators=1000)
regr2 = BaggingClassifier(regr1, n_estimators =10)
regr2.fit(X_train, Y_train)
Y_pred = regr2.predict(X_test)
return Y_pred
def main():
events = pd.read_csv('../data/test/events.csv')
feature_map = pd.read_csv('../data/test/event_feature_map.csv')
my_features(events,feature_map)
X_train, Y_train = utils.get_data_from_svmlight("../deliverables/features_svmlight.train")
X_test, Y_test = utils.get_data_from_svmlight("../deliverables/test_features.txt")
Y_pred = my_classifier_predictions(X_train,Y_train,X_test)
#Y_pred =np.round(Y_pred)
print(Y_pred)
utils.generate_submission("../deliverables/test_features.txt",Y_pred)
#The above function will generate a csv file of (patient_id,predicted label) and will be saved as "my_predictions.csv" in the deliverables folder.
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 16/4/19 下午3:17
# @Author : ZHZ
import pandas as pd
import datetime
#20151228-20160110
config = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/1_config1.csv",index_col=0)
sample = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/1_sample.csv",index_col=0)
item_store_feature = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/2_isf2.csv", index_col=0)
#没有加cost,记得加上
#item_feature = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/2_if2.csv", index_col=0)
item_feature = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/1_if1.csv", index_col=0)
days_20141009 = datetime.datetime(2014, 10, 9)
days_20151214 = datetime.datetime(2015, 12, 14)
days_20151227 = datetime.datetime(2015, 12, 27)
days_20151228 = datetime.datetime(2015, 12, 28)
days_20160110 = datetime.datetime(2016, 01, 10)
days_20141228 = datetime.datetime(2014, 12, 28)
days_20150110 = datetime.datetime(2015, 01, 10)
item_store_feature['days_20141009'] = item_store_feature['date'].\
map(lambda x:(datetime.datetime(x / 10000, x / 100 % 100, x % 100) - days_20141009).days)
item_feature['days_20141009'] = item_feature['date'].\
map(lambda x:(datetime.datetime(x / 10000, x / 100 % 100, x % 100) - days_20141009).days)
def filterItem(temp,days_start,days_end):
start = (days_start-days_20141009).days
end = (days_end-days_20141009).days
temp = temp[temp['days_20141009']>=start]
temp = temp[temp['days_20141009']<=end]
return temp
filtered_item_feature = filterItem(item_feature,days_20151214,days_20151227)
filtered_item_store_feature = filterItem(item_store_feature,days_20141228,days_20150110)
#filtered_item_feature = item_feature
#filtered_item_store_feature = item_store_feature
#得到所有仓库的一个test结果,第一个test以均值来得到
def getAllStoreTestResult():
item = []
items = []
for i,j in filtered_item_feature.groupby([filtered_item_feature['item_id']]):
item.append(i)
item.append('all')
item.append(j.qty_alipay_njhs.sum()/28)
items.append(item)
item = []
#break
pd.DataFrame(items,columns=None).to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/TestData/test1_sample_all.csv",index = None,columns=None)
#得到所有仓库的一个test结果,第一个test以均值来得到
def getKidStoreTestResult():
item = []
items = []
for i,j in filtered_item_store_feature.groupby([filtered_item_store_feature['item_id'],
filtered_item_store_feature['store_code']]):
#print j
item.append(i[0])
item.append(i[1])
item.append(j.qty_alipay_njhs.sum()/28)
items.append(item)
print item
item = []
#break
pd.DataFrame(items,columns=None).to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/TestData/test1_sample_kid.csv",index = None,columns=None)
getAllStoreTestResult()
getKidStoreTestResult()
|
import os
from flask import Flask,request,redirect,url_for,render_template
from cfenv import AppEnv
import hdbcli
from hdbcli import dbapi
from test import request_refresh_and_access_token,get_user_info_using_access_token
from tabulate import tabulate
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
templates = os.path.join(BASE_DIR,"templates")
temp_file = templates + "/tabulate.html"
app = Flask(__name__)
env = AppEnv()
hana_service = 'hana'
hana = env.get_service(label=hana_service)
# print('services', env.services)
# uaa_service = env.get_service(name='myuaa')
# security_context = xssec.create_security_context(uaa_service)
# print('uaa service',uaa_service, security_context)
@app.route('/createtable/',methods=['GET'])
def createtable():
try:
conn = dbapi.connect(address=hana.credentials['host'],
port=int(hana.credentials['port']),
user=hana.credentials['user'],
password=hana.credentials['password'],
encrypt='true',
sslTrustStore=hana.credentials['certificate'])
cursor = conn.cursor()
cursor.execute("CREATE TABLE USER_OBJECT1 (C2 VARCHAR(255),C3 VARCHAR(255),C4 VARCHAR(255), C5 VARCHAR(255))")
cursor.close()
return 'TABLE CREATED'
except Exception as e:
return 'Error: {}'.format(e)
@app.route('/')
def hello():
try:
if hana is None:
return "Can't connect to HANA service '{}' ? check service name?".format(hana_service)
else:
conn = dbapi.connect(address=hana.credentials['host'],
port=int(hana.credentials['port']),
user=hana.credentials['user'],
password=hana.credentials['password'],
encrypt='true',
sslTrustStore=hana.credentials['certificate'])
cursor = conn.cursor()
cursor.execute("select CURRENT_UTCTIMESTAMP from DUMMY")
ro = cursor.fetchone()
cursor.execute('SELECT CURRENT_USER FROM DUMMY')
techUser = cursor.fetchone()['CURRENT_USER']
cursor.execute('SELECT SESSION_CONTEXT(\'APPLICATIONUSER\') "APPLICATION_USER" FROM "DUMMY"')
appUser = cursor.fetchone()['APPLICATION_USER']
# print('fetchall', cursor.fetchall())
cursor.execute("SELECT TABLE_NAME FROM SYS.M_TABLES")
tables = cursor.fetchall()
# print("tables", cursor.fetchall())
# html output
output = '''
<h1>Welcome to SAP HANA!</h1>
<p>Technical User: %s</p>
<p>Application User: %s</p>
<p>Current time is: %s</p>
<p>tables: %s</p>
''' % (techUser,appUser,str(ro["CURRENT_UTCTIMESTAMP"]),str(tables))
cursor.close()
conn.close()
return output
except hdbcli.dbapi.Error as e:
return 'something went wrong {}'.format(e)
except Exception as e:
return 'Error: {}'.format(e)
# used to read incoming POST, PUT, DELETE request args
def getRequestParams(data):
params = {}
req = data.decode('utf-8')[1:-1].split(',')
for param in req:
temp = param.split(':')
params[temp[0].strip()[1:-1]] = temp[1].strip()[1:-1]
return params
user_email = ''
user_first_name = ''
user_last_name = ''
@app.route("/home/")
def home():
return render_template('index2.html')
@app.route("/login/")
def login():
try:
# redirects to request uri
request_uri = "https://lti.authentication.eu10.hana.ondemand.com/oauth/authorize?client_id=sb-authcode-newapp!t1686&response_type=code"
return redirect(request_uri)
except Exception as e:
return 'Error: {}'.format(e)
@app.route("/login/callback")
def callback():
try:
# Get authorization code sent back to you
code = request.args.get("code")
print('code is',code)
request_access_token = request_refresh_and_access_token(code)
if request_access_token['status'] == 200:
get_user_information = get_user_info_using_access_token(request_access_token['access_token'],
request_access_token['id_token'])
if get_user_information['status'] == 200:
# return redirect(url_for('addProduct', user = get_user_information['user_info']['email']))
global user_email,user_last_name,user_first_name
user_email = get_user_information['user_info']['email']
user_first_name = get_user_information['user_info']['first_name']
user_last_name = get_user_information['user_info']['last_name']
# return 'WELCOME {}'.format(user_email)
# return render_template('index2.html')
return redirect("https://webrouter.cfapps.eu10.hana.ondemand.com/home/")
else:
return 'could not fetch user info'
else:
return 'something went wrong in requesting access token'
except Exception as e:
return 'Error: {}'.format(e)
# adds product to database if a valid post request is received
@app.route('/addProduct/',methods=['GET','POST'])
def addProduct():
try:
if request.method == 'POST':
# check if the post request has the file part
f = request.files['file']
print('filename is=---->',f.filename,f)
# establish db connection
# createtable()
conn = dbapi.connect(address=hana.credentials['host'],
port=int(hana.credentials['port']),
user=hana.credentials['user'],
password=hana.credentials['password'],
encrypt='true',
sslTrustStore=hana.credentials['certificate'])
cursor = conn.cursor()
sql = 'INSERT INTO USER_OBJECT1 (C2, C3, C4, C5) VALUES (?, ?, ?, ?)'
cursor = conn.cursor()
# get parameters from post request
# params = getRequestParams(request.data)
# call stored procedure to add product
in_params = (user_first_name,user_last_name,user_email,f.filename,None)
print('in_params',in_params)
cursor.execute(sql,in_params)
cursor.close()
return "inserted successfully {}".format(in_params)
# user = request.args.get("user")
# return user_email
except hdbcli.dbapi.Error as e:
return 'something went wrong {}'.format(e)
# except Exception as e:
# return 'Error: {}'.format(e)
# view product from database if a valid get request is received
@app.route('/viewProduct/',methods=['GET'])
def viewProduct():
try:
table = [['FIRST NAME','LAST NAME','EMAIL','FILE']]
conn = dbapi.connect(address=hana.credentials['host'],
port=int(hana.credentials['port']),
user=hana.credentials['user'],
password=hana.credentials['password'],
encrypt='true',
sslTrustStore=hana.credentials['certificate'])
cursor = conn.cursor()
sql = 'SELECT * FROM USER_OBJECT1'
cursor = conn.cursor()
cursor.execute(sql)
records = cursor.fetchall()
print("Total number of rows in table: ",cursor.rowcount)
print("\nPrinting each row",records)
for row in records:
table.append(list(row))
print('row is',list(row))
# print("first name = ",row[0])
# print("last name = ",row[1])
# print("email = ",row[2])
# print('file = ', row[3])
# return str(records)
final_table = tabulate(table,headers='firstrow',tablefmt='html')
print(final_table)
templ_file = open(temp_file,"w")
templ_file.write(final_table)
templ_file.close()
return render_template('tabulate.html')
except hdbcli.dbapi.Error as e:
return 'something went wrong {}'.format(e)
except Exception as e:
return 'Error: {}'.format(e)
if __name__ == '__main__':
app.run(host='0.0.0.0',port=int(os.getenv("PORT",5000)))
|
def gcd(s,v):
if(v==0):
return s
else:
return gcd(v,s%v)
s1,v1=map(int,input().split())
print(gcd(s1,v1))
|
# -*- coding: utf-8 -*-
from django.db import models
from django_extensions.db.fields import AutoSlugField
from abs_models import Abs_titulado_slugfy
#from Materia.models import Materia
from Corretor.base import CorretorException,ComparadorException,CompiladorException, ExecutorException
from template_avaliacao import TemplateAvaliacao
import threading
import datetime
class Avaliacao(Abs_titulado_slugfy):
"""
Classe que representa a avaliacao feita/sendo feita por um aluno
ou um simulado
"""
aluno = models.ForeignKey('Aluno.Aluno', related_name="avaliacoes")
#template de avaliacao que gerou essa avaliacao
templateAvaliacao = models.ForeignKey(TemplateAvaliacao,related_name="avaliacoes")
ativa = models.BooleanField(u"Ativa",default=True)
#:indica se essa avaliacao é um simulado ou uma avaliacao oficial
simulado = models.BooleanField(u"Simulado?",default=False)
data_inicio = models.DateTimeField(u"Data de Inicio")
data_termino = models.DateTimeField(u"Data de Termino")
class Meta:
verbose_name = u'Avaliação'
app_label = 'Avaliacao'
def add_questao(self,questao,filtro):
kwargs={'nota':"0.00",}
if filtro.__class__.__name__ == "FiltroQuestao":
kwargs['filtro']=filtro
else:
kwargs['filtro_id']=filtro
if questao.__class__.__name__ == "Questao":
kwargs['questao']=questao
else:
kwargs['questao_id']=questao
q = self.questoes.create(**kwargs)
print "Questao Criada>>>>%s" % str(q)
# for fonte in q.questao.fontesGabarito.filter(usarNaResolucao=True):
# fonte.copiar_para_aluno(q)
@property
def get_nota(self):
"retorna o quanto o aluno fez nessa avaliacao"
res =0
for questao in self.questoes.all():
res += questao.nota
return res
def __corrigir_questao(self,**kwargs):
"metodo carregado em cada thread de terminar"
questao = kwargs.get('questao',None)
corretor = questao.questao.corretor()
try:
corretor.corrigir(**kwargs)
except CorretorException as erro:
pass
def terminar(self):
"termina essa avaliacao, ao terminar é executado uma correção geral, cada questao é corrigina em uma thread diferente."
self.ativa=False
self.save()
for questao in self.questoes.all():
t = threading.Thread(target=self.__corrigir_questao,
args=[],
kwargs={'questao':questao})
t.setDaemon(True)
t.start()
@classmethod
def get_or_create(cls,templateAvaliacao,aluno,simulado=False):
"""recupera uma avaliacao se ja houver(evitando ter mais de uma avaliacao/simulado ao mesmo tempo
para o mesmo templateAvaliacao, se nao houver então cria e retorna o mesmo.
"""
#verifica se ja existe uma avaliacao/simulado para esse aluno nesse template.
try:
#se existir pega a mesma
avaliacao = cls.objects.get(templateAvaliacao=templateAvaliacao,aluno=aluno,simulado=simulado)
except cls.DoesNotExist:
#caso nao exista cria uma
avaliacao = templateAvaliacao.gerarAvaliacao(aluno,simulado=simulado)
return avaliacao
@property
def terminada(self):
"""
retorna True se não estiver ativa e
se não for um simulado e o template estiver terminado
se for um simulado deve ser considerado se o tempo de termino é coerente.
"""
if self.ativa:
return False
if not self.simulado:
return self.templateAvaliacao.terminada
else:
return self.data_termino >= datetime.datetime.now()
|
spamdict = {
} |
# Generated by Django 3.1.3 on 2020-11-23 03:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cliente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=70, null=True)),
('email', models.CharField(max_length=100, null=True)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Pedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha_pedido', models.DateField(auto_now_add=True)),
('completado', models.BooleanField(default=False, null=True)),
('transaccion_id', models.CharField(max_length=70, null=True)),
('cliente', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tienda.cliente')),
],
),
migrations.CreateModel(
name='Producto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=70, null=True)),
('precio', models.FloatField()),
('digital', models.BooleanField(default=False, null=True)),
],
),
migrations.CreateModel(
name='Pedido_item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cantidad', models.IntegerField(default=0, null=True)),
('fecha', models.DateField(auto_now_add=True)),
('Pedido', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tienda.pedido')),
('producto', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tienda.producto')),
],
),
migrations.CreateModel(
name='Direccion_envio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('direccion', models.CharField(max_length=70, null=True)),
('ciudad', models.CharField(max_length=70, null=True)),
('estado', models.CharField(max_length=70, null=True)),
('codigo_postal', models.CharField(max_length=70, null=True)),
('fecha', models.DateField(auto_now_add=True)),
('cliente', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tienda.cliente')),
('pedido', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tienda.pedido')),
],
),
]
|
import unittest
import task2 as t
class MyTestCase(unittest.TestCase):
def test_get_history_successes(self):
top_list1 = [14000000, 13500000, 13500000,
11000000, 9000000, 9000000,
9000000]
successes_data_team1 = [100000, 900000, 8000000,
2000000, 2700000, 100000]
result1 = [5, 5, 4, 3, 2, 2]
self.assertEqual(result1,t.get_history_successes(top_list1,
successes_data_team1))
top_list2 = [100, 100, 99, 99, 99, 95, 95, 90, 90, 1, 1, 1]
successes_data_team2 = [0, 1, 89, 4, 5, 1, 1, 1]
result2 = [6, 5, 4, 4, 2, 1, 1, 1]
self.assertEqual(result2, t.get_history_successes(top_list2,
successes_data_team2))
top_list3 = [10, 9, 9, 9, 8, 7, 7, 7, 6, 5, 4, 3, 2, 2, 1]
successes_data_team3 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
result3 = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 1, 1]
self.assertEqual(result3, t.get_history_successes(top_list3,
successes_data_team3))
if __name__ == '__main__':
unittest.main()
|
#demo06_stack.py 组合与拆分
import numpy as np
a = np.arange(1, 7).reshape(2, 3)
b = np.arange(7, 13).reshape(2, 3)
print(a)
print(b)
c = np.hstack((a, b))
print(c)
a, b = np.hsplit(c, 2)
print(a)
print(b)
c = np.vstack((a, b))
print(c)
a, b = np.vsplit(c, 2)
print(a)
print(b)
c = np.dstack((a, b))
print(c)
a, b = np.dsplit(c, 2)
print(a)
print(b)
|
#!/usr/bin/env python
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters(host='192.168.20.10'))
channel = connection.channel()
channel.exchange_declare(exchange='ip_exchange', type='fanout')
message = ' '.join(sys.argv[1:]) or "10.10.10.10"
channel.basic_publish(exchange='ip_exchange', routing_key='', body=message)
print " [x] Sent %r" % (message,)
connection.close() |
#!/usr/bin/env python
# -*- coding::utf-8 -*-
# Author :GG
# 给定一个排序数组和一个目标值,在数组中找到目标值,并返回其索引。如果目标值不存在于数组中,返回它将会被按顺序插入的位置。
#
# 你可以假设数组中无重复元素。
#
# 示例 1:
#
# 输入: [1,3,5,6], 5
# 输出: 2
#
#
# 示例 2:
#
# 输入: [1,3,5,6], 2
# 输出: 1
#
#
# 示例 3:
#
# 输入: [1,3,5,6], 7
# 输出: 4
#
#
# 示例 4:
#
# 输入: [1,3,5,6], 0
# 输出: 0
#
# Related Topics 数组 二分查找
# 👍 555 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
from typing import List
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
if target in nums:
return nums.index(target)
nums.append(target)
nums.sort()
return nums.index(target)
# leetcode submit region end(Prohibit modification and deletion)
|
#!/usr/bin/env python3
info = [
("jp1", "1077", ["2006-06-05-12-second.txt", "2008-06-03-11-first.txt"]),
("jp2", "1080", ["2007-07-03-06-second.txt", "2007-06-17-06-second.txt"]),
("jp3", "936", ["2006-06-01-12-second.txt"]),
("jp4", "1099", ["2009-01-05-11-first.txt", "2013-10-28-03-first.txt", "2012-11-30-14-first.txt", "2012-02-03-20-first.txt"]),
("jp5", "1090", ["2007-12-17-04-first.txt", "2006-12-10-09-second.txt"]),
("va1", "1019", ["2005-07-25-11-first.txt", "2012-12-15-02-first.txt", "2005-02-08-02-second.txt", "2013-05-05-18-first.txt", "2013-05-07-02-first.txt", "2005-05-19-11-first.txt", "2012-06-02-18-first.txt"]),
("va2", "968", ["2008-04-27-06-first.txt"]),
("va3", "1053", ["2007-06-04-10-second.txt", "2010-08-29-20-first.txt"]),
("va4", "1055", ["2013-01-30-01-first.txt", "2013-02-24-02-first.txt", "2010-08-05-11-first.txt", "2006-02-24-10-first.txt"]),
("va5", "1098", ["2007-08-19-01-first.txt", "2012-05-20-16-first.txt", "2013-09-12-11-first.txt"]),
("jkk1", "984", ["2005-06-20-03-second.txt", "2009-03-25-11-first.txt", "2010-02-13-01-first.txt"]),
("jkk2", "933", ["2012-11-24-20-first.txt", "2012-05-04-04-first.txt", "2013-07-19-03-first.txt", "2013-05-19-22-first.txt", "2013-12-02-21-first.txt"]),
("jkk3", "1098", ["2008-04-30-08-first.txt", "2011-04-28-09-first.txt"]),
("jkk4", "1037", ["2006-08-23-03-first.txt", "2007-09-07-07-second.txt"]),
("jkk5", "984", ["2007-01-19-07-first.txt", "2008-04-20-09-first.txt"]),
("jkk6", "553", ["2011-08-17-08-first.txt", "2013-08-30-11-first.txt", "2013-07-10-23-first.txt", "2013-10-11-07-first.txt", "2013-01-11-13-first.txt", "2013-10-04-12-first.txt"]),
]
for name, count, filenames in info:
out = open(name +"-list.txt", 'w')
print("../../guide.txt", 'tmp-file', 0, 0, file=out)
for filename in filenames:
to_annotate = "../../raw/"+ filename
to_save = filename +".annotated."+ name[:-1]
print(to_annotate, to_save, 101, 0, file=out)
out.close()
|
from selenium import webdriver
from selenium.webdriver.support.select import Select
driver = webdriver.Ie()
driver.get("https://172.18.5.111")
driver.find_element_by_id("overridelink").click()
driver.find_element_by_xpath("//img[@src='/no1/images/passwd.gif']").click() |
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
svrIP = input(("Sever IP(Sever IP(default: 127.0.0.1): "))
if svrIP=='':
svrIP= '127.0.0.1'
port = input('port(default: 2500):')
if port == '':
port = 2500
else:
port = int(port)
sock.connect((svrIP,port))
print('Connetcted to' + svrIP)
while True:
msg = input("Sending message: ")
if not msg:
continue
try:
sock.send(msg.encode())
except:
print("연결종료")
break
try:
msg = sock.recv(1024)
if not msg:
print("연결종료")
break
print(f'Received message: {msg.decode()}')
except:
print("연결이 종료되었습니다")
sock.close()
|
#!/usr/bin/python
import cgi
import cgitb
cgitb.enable()
print "Content-type: text/html\r\n\r\n"
form = cgi.FieldStorage()
if "file" in form.keys():
files = form["file"]
print files.filename, files.name, files.value
open('/tmp/' + files.filename, 'wb').write(files.value.read()) |
#preloaded variable: "dictionary"
def make_backronym(acronym):
return ' '.join(dictionary[x] for x in acronym.upper())
'''
back·ro·nym
An acronym deliberately formed from a phrase whose initial letters spell out
a particular word or words, either to create a memorable name or as a
fanciful explanation of a word's origin.
"Biodiversity Serving Our Nation", or BISON
(from https://en.oxforddictionaries.com/definition/backronym)
Complete the function to create backronyms. Transform the given string
(without spaces) to a backronym, using the preloaded dictionary and return
a string of words, separated with a single space (but no trailing spaces).
The keys of the preloaded dictionary are uppercase letters A-Z and the values
are predetermined words, for example:
dictionary["P"] == "perfect"
Examples
"dgm" ==> "disturbing gregarious mustache"
"lkj" ==> "literal klingon joke"
'''
|
#-*- coding:utf8 -*-
# Copyright (c) 2020 barriery
# Python release: 3.7.0
# Create time: 2020-07-14
import json
from .query import QueryExecutor
from .entity import Contract, Node, Cluster
from operator import itemgetter, attrgetter
import logging
def load_balancing_by_node_centor(node_center, privateKey, publicKey):
q = QueryExecutor()
data = q.queryNodesConnWithNodeCenter(
nc_home=node_center, publicKey=publicKey, privateKey=privateKey)
print(data)
# TODO
return None
def query_deployed_cluster(clusters, threshold, contract):
for cluster in clusters:
cluster.init(threshold)
cluster.calculate_storage_and_traffic()
for cluster in clusters:
if cluster.remain_storage >= contract.storage \
and cluster.remain_traffic >= contract.traffic:
logging.info("Succ to schedule: {}".format(cluster.name))
return cluster.name
logging.error("Failed to schedule.")
return None
def _print_cluster_state(clusters):
logging.debug("===== state =====")
for cluster in clusters:
logging.debug("Cluster({}): {}".format(cluster.name, cluster.contracts.keys()))
logging.debug("=================")
def load_balancing_by_nodes(clusters, threshold):
tot_contract_count = 0
for cluster in clusters:
cluster.init(threshold)
logging.debug("succ init cluster: {}".format(cluster.name))
tot_contract_count += len(cluster.contracts)
transfers = []
step = 0
while step < tot_contract_count:
for cluster in clusters:
cluster.calculate_storage_and_traffic()
_print_cluster_state(clusters)
index = 0
for cluster in clusters:
logging.debug("cluster({}) remain_storage: {}, remain_traffic: {}"
.format(cluster.name, cluster.remain_storage, cluster.remain_traffic))
if cluster.remain_storage < 0 \
or cluster.remain_traffic < 0:
break
index += 1
if index == len(clusters):
logging.info("Succ to transfer")
break
contract = None
if clusters[index].remain_storage < 0:
contract = cluster.max_storage_contract()
elif clusters[index].remain_traffic < 0:
contract = cluster.max_traffic_contract()
logging.debug("contract(id: {}, storage: {}, traffic: {}) "
"in cluster({}) should be transfered".format(
contract.cid, contract.storage, contract.traffic,
clusters[index].name))
transfer = None
for idx, cluster in enumerate(clusters):
if idx != index:
if cluster.check_can_add_contract(contract):
logging.debug("try add contract(cid: {}, storage: {}, traffic: {}) to cluster({})"
.format(contract.cid, contract.storage, contract.traffic, cluster.name))
cluster.add_contract(contract)
logging.debug("try rm contract(cid: {}, storage: {}, traffic: {}) from cluster({})"
.format(contract.cid, contract.storage, contract.traffic, clusters[index].name))
clusters[index].remove_contract(contract.cid)
transfer = {
"cid": contract.cid,
"src": cluster.name,
"dst": clusters[index].name,
}
logging.info("Succ to transfer one contract: {}".format(transfer))
break
if transfer is None:
logging.debug("Failed to transfer.")
break
transfers.append(transfer)
step += 1
if step >= tot_contract_count:
return None
return transfers
if __name__ == "__main__":
privateKey = "ab8c753378a031976cf2a848e57299240cdbbdecf36e726aa8a1e4a9fa9046e1"
publicKey = "042ee9d52a0d31f1e4f9f16a636154179ed3386706add6439b778e7cc7792743b1a75076fb9682411a87ecef88652999f646a0a9b232ceecde59b5c39e7bb49f2f"
nc_home = "http://127.0.0.1:1718"
# load_balancing_by_node_centor(nc_home, privateKey, publicKey)
node_infos = [{
"home": "http://127.0.0.1:8080/SCIDE/SCManager",
"storage": 1019400000.0,
"traffic": 100000.0
}, {
"home": "http://127.0.0.1:9090/SCIDE/SCManager",
"storage": 1019400000.0,
"traffic": 100000.0
}]
threshold = 0.8
print(load_balancing_by_nodes(node_infos, threshold))
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" ECP5 Versa platform definitions.
This is a non-core platform. To use it, you'll need to set your LUNA_PLATFORM variable:
> export LUNA_PLATFORM="luna.gateware.platform.versa:ECP5Versa_5G_Platform"
"""
from amaranth import *
from amaranth.build import *
from amaranth.vendor.lattice_ecp5 import LatticeECP5Platform
from amaranth.lib.cdc import ResetSynchronizer
from amaranth_boards.versa_ecp5_5g import VersaECP55GPlatform as _VersaECP55G
from amaranth_boards.resources import *
from ..interface.pipe import AsyncPIPEInterface
from ..interface.serdes_phy import ECP5SerDesPIPE
from .core import LUNAPlatform
__all__ = ["ECP5Versa_5G_Platform"]
class VersaDomainGenerator(Elaboratable):
""" Clock generator for ECP5 Versa boards. """
def __init__(self, *, clock_frequencies=None, clock_signal_name=None):
pass
def elaborate(self, platform):
m = Module()
# Create our domains.
m.domains.ss = ClockDomain()
m.domains.sync = ClockDomain()
m.domains.usb = ClockDomain()
m.domains.usb_io = ClockDomain()
m.domains.fast = ClockDomain()
# Grab our clock and global reset signals.
clk100 = platform.request(platform.default_clk)
reset = platform.request(platform.default_rst)
# Generate the clocks we need for running our SerDes.
feedback = Signal()
usb3_locked = Signal()
m.submodules.pll = Instance("EHXPLLL",
# Clock in.
i_CLKI=clk100,
# Generated clock outputs.
o_CLKOP=feedback,
o_CLKOS= ClockSignal("sync"),
o_CLKOS2=ClockSignal("fast"),
# Status.
o_LOCK=usb3_locked,
# PLL parameters...
p_CLKI_DIV=1,
p_PLLRST_ENA="ENABLED",
p_INTFB_WAKE="DISABLED",
p_STDBY_ENABLE="DISABLED",
p_DPHASE_SOURCE="DISABLED",
p_CLKOS3_FPHASE=0,
p_CLKOS3_CPHASE=0,
p_CLKOS2_FPHASE=0,
p_CLKOS2_CPHASE=5,
p_CLKOS_FPHASE=0,
p_CLKOS_CPHASE=5,
p_CLKOP_FPHASE=0,
p_CLKOP_CPHASE=4,
p_PLL_LOCK_MODE=0,
p_CLKOS_TRIM_DELAY="0",
p_CLKOS_TRIM_POL="FALLING",
p_CLKOP_TRIM_DELAY="0",
p_CLKOP_TRIM_POL="FALLING",
p_OUTDIVIDER_MUXD="DIVD",
p_CLKOS3_ENABLE="DISABLED",
p_OUTDIVIDER_MUXC="DIVC",
p_CLKOS2_ENABLE="ENABLED",
p_OUTDIVIDER_MUXB="DIVB",
p_CLKOS_ENABLE="ENABLED",
p_OUTDIVIDER_MUXA="DIVA",
p_CLKOP_ENABLE="ENABLED",
p_CLKOS3_DIV=1,
p_CLKOS2_DIV=2,
p_CLKOS_DIV=4,
p_CLKOP_DIV=5,
p_CLKFB_DIV=1,
p_FEEDBK_PATH="CLKOP",
# Internal feedback.
i_CLKFB=feedback,
# Control signals.
i_RST=reset,
i_PHASESEL0=0,
i_PHASESEL1=0,
i_PHASEDIR=1,
i_PHASESTEP=1,
i_PHASELOADREG=1,
i_STDBY=0,
i_PLLWAKESYNC=0,
# Output Enables.
i_ENCLKOP=0,
i_ENCLKOS=0,
i_ENCLKOS2=0,
i_ENCLKOS3=0,
# Synthesis attributes.
a_ICP_CURRENT="12",
a_LPF_RESISTOR="8"
)
# Temporary: USB FS PLL
feedback = Signal()
usb2_locked = Signal()
m.submodules.fs_pll = Instance("EHXPLLL",
# Status.
o_LOCK=usb2_locked,
# PLL parameters...
p_PLLRST_ENA="ENABLED",
p_INTFB_WAKE="DISABLED",
p_STDBY_ENABLE="DISABLED",
p_DPHASE_SOURCE="DISABLED",
p_OUTDIVIDER_MUXA="DIVA",
p_OUTDIVIDER_MUXB="DIVB",
p_OUTDIVIDER_MUXC="DIVC",
p_OUTDIVIDER_MUXD="DIVD",
p_CLKI_DIV = 20,
p_CLKOP_ENABLE = "ENABLED",
p_CLKOP_DIV = 16,
p_CLKOP_CPHASE = 15,
p_CLKOP_FPHASE = 0,
p_CLKOS_DIV = 12,
p_CLKOS_CPHASE = 0,
p_CLKOS_FPHASE = 0,
p_CLKOS2_ENABLE = "ENABLED",
p_CLKOS2_DIV = 10,
p_CLKOS2_CPHASE = 0,
p_CLKOS2_FPHASE = 0,
p_CLKOS3_ENABLE = "ENABLED",
p_CLKOS3_DIV = 40,
p_CLKOS3_CPHASE = 5,
p_CLKOS3_FPHASE = 0,
p_FEEDBK_PATH = "CLKOP",
p_CLKFB_DIV = 6,
# Clock in.
i_CLKI=clk100,
# Internal feedback.
i_CLKFB=feedback,
# Control signals.
i_RST=reset,
i_PHASESEL0=0,
i_PHASESEL1=0,
i_PHASEDIR=1,
i_PHASESTEP=1,
i_PHASELOADREG=1,
i_STDBY=0,
i_PLLWAKESYNC=0,
# Output Enables.
i_ENCLKOP=0,
i_ENCLKOS2=0,
# Generated clock outputs.
o_CLKOP=feedback,
o_CLKOS2=ClockSignal("usb_io"),
o_CLKOS3=ClockSignal("usb"),
# Synthesis attributes.
a_FREQUENCY_PIN_CLKI="25",
a_FREQUENCY_PIN_CLKOP="48",
a_FREQUENCY_PIN_CLKOS="48",
a_FREQUENCY_PIN_CLKOS2="12",
a_ICP_CURRENT="12",
a_LPF_RESISTOR="8",
a_MFG_ENABLE_FILTEROPAMP="1",
a_MFG_GMCREF_SEL="2"
)
# Control our resets.
m.d.comb += [
ClockSignal("ss") .eq(ClockSignal("sync")),
# ResetSignal("ss") .eq(~usb3_locked),
ResetSignal("sync") .eq(ResetSignal("ss")),
ResetSignal("fast") .eq(ResetSignal("ss")),
# ResetSignal("usb") .eq(~usb2_locked),
ResetSignal("usb_io") .eq(ResetSignal("usb")),
]
# LOCK is an asynchronous output of the EXHPLL block.
m.submodules += ResetSynchronizer(~usb2_locked, domain="usb")
m.submodules += ResetSynchronizer(~usb3_locked, domain="ss")
return m
class VersaSuperSpeedPHY(AsyncPIPEInterface):
""" Superspeed PHY configuration for the Versa-5G. """
REFCLK_FREQUENCY = 312.5e6
SS_FREQUENCY = 125.0e6
FAST_FREQUENCY = 250.0e6
SERDES_DUAL = 0
SERDES_CHANNEL = 0
def __init__(self, platform):
# Grab the I/O that implements our SerDes interface...
serdes_io_directions = {
'ch0': {'tx':"-", 'rx':"-"},
#'ch1': {'tx':"-", 'rx':"-"},
'refclk': '-',
}
serdes_io = platform.request("serdes", self.SERDES_DUAL, dir=serdes_io_directions)
serdes_channel = getattr(serdes_io, f"ch{self.SERDES_CHANNEL}")
# Use it to create our soft PHY...
serdes_phy = ECP5SerDesPIPE(
tx_pads = serdes_channel.tx,
rx_pads = serdes_channel.rx,
dual = self.SERDES_DUAL,
channel = self.SERDES_CHANNEL,
refclk_frequency = self.FAST_FREQUENCY,
)
# ... and bring the PHY interface signals to the MAC domain.
super().__init__(serdes_phy, width=4, domain="ss")
def elaborate(self, platform):
m = super().elaborate(platform)
# Patch in our soft PHY as a submodule.
m.submodules.phy = self.phy
# Drive the PHY reference clock with our fast generated clock.
m.d.comb += self.clk.eq(ClockSignal("fast"))
# This board does not have a way to detect Vbus, so assume it's always present.
m.d.comb += self.phy.power_present.eq(1)
# Enable the Versa's reference clock.
m.d.comb += platform.request("refclk_enable").o.eq(1)
return m
class ECP5Versa_5G_Platform(_VersaECP55G, LUNAPlatform):
name = "ECP5 Versa 5G"
clock_domain_generator = VersaDomainGenerator
default_usb3_phy = VersaSuperSpeedPHY
default_usb_connection = None
additional_resources = [
Resource("serdes", 0,
Subsignal("ch0",
Subsignal("rx", DiffPairs("Y5", "Y6")),
Subsignal("tx", DiffPairs("W4", "W5")),
),
#Subsignal("ch1",
# Subsignal("rx", DiffPairs("Y7", "Y8")),
# Subsignal("tx", DiffPairs("W8", "W9"))
#),
#Subsignal("refclk", DiffPairs("Y11", "Y12"))
),
# The SerDes reference clock oscillator must be explicitly enabled.
Resource("refclk_enable", 0, Pins("C12", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
# Temporary USB connection, for debugging.
DirectUSBResource(0, d_p="A8", d_n="A12", pullup="B13", attrs=Attrs(IO_TYPE="LVCMOS33"))
]
# Create our semantic aliases.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_resources(self.additional_resources)
|
import torch.utils.data as data_utils
from torch import Tensor
from train_config import *
import torch.nn.functional as F
DATA_DIM = 168
def predict(data, model, preprocessor=None):
dfs = []
p = u.Permuter()
for i in range(3):
p.ix = np.ones_like(data.index)*i
x = p.permute(data).replace(np.inf, 0)
tensor = Tensor(preprocessor.transform(x.values)
) if preprocessor is not None else Tensor(x.values)
preds = (
model(tensor.to(model.device))
.detach()
.squeeze()
.cpu()
.numpy())
shape = preds.shape
if len(shape) == 1:
cols = 'preds'
else:
cols = [f'preds{i}' for i in range(shape[1])]
x[cols] = preds
x['Permutation'] = i
dfs.append(x)
return pd.concat(dfs)
def main(args):
print("Arguments: ", args)
print("cpus:", os.sched_getaffinity(0))
preprocessor = load(args.preprocessor)
if args.config is None:
args.config = args.model_dir/'model.config'
config = load(args.config)
best_trained_model = models.load_model(
config,
DATA_DIM,
checkpoint_dir=args.model_dir)
best_trained_model.eval()
dfs = []
for data_file in args.data_files:
with pd.HDFStore(data_file) as hdf:
keys = hdf.keys()
if len(keys) > 1:
for key in keys:
data = (pd
.read_hdf(data_file, key)
.drop(columns='randomcolumn', errors='ignore'))
if data.empty:
print('no quartets found')
continue
x = predict(data, best_trained_model, preprocessor)
x['matrix'] = key.replace('/', '')
dfs.append(x)
else:
data = (pd
.read_hdf(data_file)
.drop(columns='randomcolumn', errors='ignore'))
if data.empty:
print('no quartets found')
continue
x = predict(data, best_trained_model, preprocessor)
x['matrix'] = data_file.name.replace('.genes.hdf5', '')
dfs.append(x)
outfile = args.outdir/args.outfile
pd.concat(dfs).to_pickle(outfile)
print(f'wrote to {outfile}')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Test on metazoa data.")
parser.add_argument(
"--config",
type=Path,
help="path to config dict (joblib pickle)",
)
parser.add_argument(
"--classify",
action="store_true",
help="predict topology",
)
parser.add_argument(
"--model_dir",
help="directory of trained model",
type=Path,
default=Path("/N/project/phyloML/deep_ils/results/final_trained/"),
)
parser.add_argument(
"--outdir",
help="dir to store data matrix and prediction file",
type=Path,
)
parser.add_argument(
"--data_files",
"-i",
type=Path,
nargs="+",
default=None,
help="input hdf5 file(s) "
)
parser.add_argument(
"--outfile",
type=Path,
default='metazoa.preds.pd.gz',
help="path to sklearn preprocessor"
)
parser.add_argument(
"--preprocessor",
type=Path,
default=None,
help="path to sklearn preprocessor"
)
args = parser.parse_args()
args.outdir.mkdir(parents=True, exist_ok=True)
main(args)
|
import pyautogui
import keyboard
from pynput.mouse import Button, Controller
mouse = Controller()
# Use pyautogui to determine the X and Y mouse position.
xPos = [733, 875, 1015, 1160]
y = 725
darknessVal = 2.36
# Columns
lock = [False, False, False, False]
while keyboard.is_pressed('q') == False:
for col, x in enumerate(xPos):
rgb = pyautogui.pixel(x, y)
# Calculate the darkness value
blackVal = ( rgb[0] * 0.3 ) + ( rgb[1] * 0.59 ) + ( rgb[2] * 0.11 )
if lock[col]:
if blackVal > darknessVal:
lock[col] = False
else:
continue
# How dark the pixel color is
if blackVal <= darknessVal:
mouse.release(Button.left)
mouse.position = (x, y)
lock[col] = True
mouse.press(Button.left)
mouse.release(Button.left)
print("Exit successfully!") |
# 문자열 함수 들
title = "TEAMLAB X Inflearn"
title.upper()
title.lower()
title.split()
title.isdigit()
title.title()
|
from graph_txt_files.txt_functions import *
import graph_txt_files.txt_functions
|
import re
import string
def preproccess_text(text: str) -> str:
"""
Функция для предварительной обработки текста.
:param text: str
:return: str
"""
text = text.replace("ё", "е")
text = re.sub('((www\.[^\s]+)|(https?://[^\s]+))', 'URL', text)
text = re.sub('@[^\s]+', 'USER', text)
text = text.replace(",", " ")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = re.sub(' +', ' ', text)
text = text.replace("\n", "")
return text.strip()
def find_polarity(text: str, negative, positive: set(), set_type: str) -> dict():
"""
Функция для поиска в тексте "полярных" слов или смайликов.
Полярных - значит противоположных по тональности:
добрый <-> злой или :) <-> :(
Возвращает словарь с двумя списками.
:param text: str
:param negative: set()
:param positive: set()
:param set_type: str
:return: dict()
"""
assert len(negative) > 0 or len(positive), "Наборы не должны быть пустыми"
assert set_type in ["words", "smiles"], "Допустимы два значения set_type: words или smiles"
if set_type == "words":
neg1w = [neg for neg in negative if neg.count(" ") == 0] # набор из одного слова
neg2w = [neg for neg in negative if neg.count(" ") > 0] # набор из двух и более слов
# pos1 = list(filter(lambda x: x.count(" ") == 0), positive)
pos1w = [pos for pos in positive if pos.count(" ") == 0]
pos2w = [pos for pos in positive if pos.count(" ") > 0]
# преобразование текста в набор и поиск слова в наборе
# текст приводится к нижнему регистру только в этом блоке,
# при поиске тональных слов
# иначе будут преобразованы смайлики ( =D ---> =d )
text = text.lower()
text = re.sub('[^a-zA-Zа-яА-Я1-9]+', ' ', text)
text = re.sub(' +', ' ', text)
text = text.strip(string.punctuation)
text_set = set(text.split(" "))
neg = [t for t in text_set if t in neg1w]
pos = [t for t in text_set if t in pos1w]
# поиск многословных тональных выражений в тексте
neg += [word for word in neg2w if word in text]
pos += [word for word in pos2w if word in text]
else:
neg = [neg for neg in negative if neg in text]
pos = [pos for pos in positive if pos in text]
return {"negative": neg, "positive": pos}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Hanzhiyun'
def triangles():
tri = [1]
while True:
yield tri
tri = [sum(i) for i in zip([0] + tri, tri + [0])]
return
n = 0
for t in triangles():
print(t)
n += 1
if 10 == n:
break
|
from django.apps import AppConfig
class ApiFormatterConfig(AppConfig):
name = 'api_formatter'
|
#!/usr/bin/env python
# coding: utf-8
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from string import punctuation
import sys
from heapq import nlargest
nlp = spacy.load('en_core_web_sm')
stopwords = list(STOP_WORDS)
def calc_word_frequencies(doc):
print(type(doc))
word_frequencies = {}
for word in doc:
if word.text not in stopwords and word.text not in punctuation:
if word.text not in word_frequencies.keys():
word_frequencies[word.text] = 1
else:
word_frequencies[word.text] += 1
return word_frequencies
def get_max_frequency(word_frequencies):
return max(word_frequencies.values())
def normalize_word_frequencies(word_frequencies):
max_frequency = get_max_frequency(word_frequencies)
for word in word_frequencies.keys():
word_frequencies[word] = (word_frequencies[word]/max_frequency)
return word_frequencies
def get_sent_scores(sentence_list,word_frequencies):
sentence_scores = {}
for i,sent in enumerate(sentence_list):
for word in sent:
if word.text in word_frequencies.keys():
if sent not in sentence_scores.keys():
sentence_scores[sent] = [word_frequencies[word.text],i]
else:
sentence_scores[sent][0] += word_frequencies[word.text]
return sentence_scores
def generate_summary(doc,sents_in_summary):
#print('sents_in_summary: ', type(sents_in_summary))
word_frequencies = calc_word_frequencies(doc)
word_frequencies = normalize_word_frequencies(word_frequencies)
sentence_scores = get_sent_scores([sent for sent in doc.sents],word_frequencies)
#sorting according to decreasing order of importance and choosing the first (sents_in_summary) sentences
summarized_sentences = sorted(sentence_scores.items(),key=lambda x: x[1],reverse=True)[:sents_in_summary]
#sorting according to appearance of sentences in the original text
summarized_sentences = sorted(summarized_sentences,key=lambda x: x[1][1])
final_sentences = [x[0].text.capitalize() for x in summarized_sentences]
summary = " ".join(final_sentences)
return summary
# doc = '''In the distant past, many people thought bats had magical powers, but times
# have changed. Today, many people believe that bats are rodents, that they cannot
# see, and that they are more likely than other animals to carry rabies. All of these
# beliefs are mistaken. Bats are not rodents, are not blind, and are no more likely
# than dogs and cats to transmit rabies. Bats, in fact, are among the least understood
# and least appreciated of animals.
# Bats are not rodents with wings, contrary to popular belief. Like all rodents, bats
# are mammals, but they have a skeleton similar to the human skeleton. The bones in
# bat wings are much like those in arms and the human hand, with a thumb and four
# fingers. In bats, the bones of the arms and the four fingers of the hands are very
# long. This bone structure helps support the web of skin that stretches from the body
# to the ends of the fingers to form wings.t
# Although bats cannot see colors, they have good vision in both dim and bright
# light. Since most bats stay in darkness during the day and do their feeding at night,
# they do not use their vision to maneuver in the dark but use a process called
# echolocation. This process enables bats to emit sounds from their mouths that bounce
# off objects and allow them to avoid the objects when flying. They use this system to
# locate flying insects to feed on as well. Typically, insect-eating bats emerge at dusk
# and fly to streams or ponds where they feed. They catch the insects on their wingtip
# or tail membrane and fling them into their mouths while flying.
# There are about 1,000 species of bat, ranging in size from the bumblebee bat,
# which is about an inch long, to the flying fox, which is sixteen inches long and has a
# wingspan of five feet. Each type of bat has a specialized diet. For seventy percent
# of bats, the diet is insects. Other types of bats feed on flowers, pollen, nectar, and
# fruit or on small animals such as birds, mice, lizards, and frogs.
# One species of bat feeds on the blood of large mammals. This is the common
# vampire bat, which lives only in Latin America and is probably best known for
# feeding on the blood of cattle. Unfortunately, in an attempt to control vampire bat
# populations, farmers have unintentionally killed thousands of beneficial fruit-and
# insect-eating bats as well.
# Bats, in fact, perform a number of valuable functions. Their greatest economic
# value is in eliminating insect pests. Insect- eating bats can catch six hundred
# mosquitoes in an hour and eat half their body weight in insects every night. In many
# tropical rain forests, fruit-eating bats are the main means of spreading the seeds of
# tropical fruits. Nectar-feeding bats pollinate a number of tropical plants. If it were
# not for bats, we might not have peaches, bananas, mangoes, guavas, figs, or dates.
# Today, the survival of many bat species is uncertain. Sixty percent of bats do not
# survive past infancy. Some are killed by predators such as owls, hawks, snakes and
# other meat-eating creatures, but most are victims of pesticides and other human
# intrusions. In Carlsbad Caverns, New Mexico, where there were once eight million
# bats, there are now a quarter million. At Eagle Creek, Arizona, the bat population
# dropped from thirty million to thirty thousand in six years.
# Bats often have been burdened with a bad reputation, perhaps because they
# are not the warm, cuddly sort of animal we love to love. However, their unusual
# physical features should not lead us to overestimate their harm or to underestimate
# their value.'''
# doc = '''INCOME TAX DEPARTMENT
# GOVT. OF INDIA
# Permanent Account Number Card
# FFCPP4452K
# TH /Name
# MOHAMMED MEHDI PATEL
# funIT aT TH / Father's Name
# ALIRAZA PATEL
# Date of Birth
# 02/08/2000
# BAER /Signature'''
# doc = nlp(doc.lower())
# tokens = [token for token in doc]
# sents_in_summary = 2
# summary = generate_summary(doc,sents_in_summary)
|
from django.db import models
'''
Account class:
account_type: 'employee', 'foreman', 'director'
'''
class Account(models.Model):
identity = models.CharField(max_length=4,default='0000')
password = models.CharField(max_length=200, default='00000')
account_type = models.CharField(max_length=200)
bank_account = models.CharField(max_length=200)
address = models.CharField(max_length=200000)
email = models.EmailField(default = 'none')
approved = models.IntegerField(default=0)
#ApplicationState
class paycheck(models.Model):
account = models.ForeignKey(Account,on_delete=models.CASCADE)
identity = models.CharField(max_length=4,default=0000)
work_hour = models.IntegerField(default=0)
deduction = models.CharField(max_length=200000)
salary = models.CharField(max_length=2000000)
# Create your models here.
|
""""
绘制X、Y两个方向束斑大小,随阶数、动量分散变化图。(两个方向 * 动量分散取 8% 0 -7%,共6条线)
然后取一阶和五阶,绘制束斑图
"""
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
sys.path.append(path.dirname(path.dirname(
path.abspath(path.dirname(__file__)))))
from cctpy import *
#----------------------------- 修改机架 COSY MAP --------------------
# COSY_MAP:str = COSY_MAP_手动优化至伪二阶
COSY_MAP:str = COSY_MAP_廖益诚五阶光学优化
#-----------------------------------------------------------------------------
map = CosyMap(COSY_MAP)
num = 64
# 粒子
pps0x = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane(
xMax=3.5*MM,xpMax=7.5*MM,delta=0.0,number=num,plane_id=PhaseSpaceParticle.XXP_PLANE
)
pps8x = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane(
xMax=3.5*MM,xpMax=7.5*MM,delta=0.08,number=num,plane_id=PhaseSpaceParticle.XXP_PLANE
)
ppsm7x = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane(
xMax=3.5*MM,xpMax=7.5*MM,delta=-0.07,number=num,plane_id=PhaseSpaceParticle.XXP_PLANE
)
pps0y = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane(
xMax=3.5*MM,xpMax=7.5*MM,delta=0.0,number=num,plane_id=PhaseSpaceParticle.YYP_PLANE
)
pps8y = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane(
xMax=3.5*MM,xpMax=7.5*MM,delta=0.08,number=num,plane_id=PhaseSpaceParticle.YYP_PLANE
)
ppsm7y = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane(
xMax=3.5*MM,xpMax=7.5*MM,delta=-0.07,number=num,plane_id=PhaseSpaceParticle.YYP_PLANE
)
xw0s:List[P2] = []
xw8s:List[P2] = []
xwm7s:List[P2] = []
yw0s:List[P2] = []
yw8s:List[P2] = []
ywm7s:List[P2] = []
for order in range(1,10):
pps0xd = map.apply_phase_space_particles(pps0x,order)
pps8xd = map.apply_phase_space_particles(pps8x,order)
ppsm7xd = map.apply_phase_space_particles(ppsm7x,order)
pps0yd = map.apply_phase_space_particles(pps0y,order)
pps8yd = map.apply_phase_space_particles(pps8y,order)
ppsm7yd = map.apply_phase_space_particles(ppsm7y,order)
xw0 = BaseUtils.Statistic().add_all([p.x for p in pps0xd]).half_width()/MM
xw8 = BaseUtils.Statistic().add_all([p.x for p in pps8xd]).half_width()/MM
xwm7 = BaseUtils.Statistic().add_all([p.x for p in ppsm7xd]).half_width()/MM
yw0 = BaseUtils.Statistic().add_all([p.y for p in pps0yd]).half_width()/MM
yw8 = BaseUtils.Statistic().add_all([p.y for p in pps8yd]).half_width()/MM
ywm7 = BaseUtils.Statistic().add_all([p.y for p in ppsm7yd]).half_width()/MM
xw0s.append(P2(order,xw0))
xw8s.append(P2(order,xw8))
xwm7s.append(P2(order,xwm7))
yw0s.append(P2(order,yw0))
yw8s.append(P2(order,yw8))
ywm7s.append(P2(order,ywm7))
Plot2.plot_p2s(xw0s,describe='g-')
Plot2.plot_p2s(xw8s,describe='b-')
Plot2.plot_p2s(xwm7s,describe='r-')
Plot2.plot_p2s(yw0s,describe='g--')
Plot2.plot_p2s(yw8s,describe='b--')
Plot2.plot_p2s(ywm7s,describe='r--')
Plot2.plot_p2s(xw0s,describe='go')
Plot2.plot_p2s(xw8s,describe='bo')
Plot2.plot_p2s(xwm7s,describe='ro')
Plot2.plot_p2s(yw0s,describe='go')
Plot2.plot_p2s(yw8s,describe='bo')
Plot2.plot_p2s(ywm7s,describe='ro')
Plot2.info("Order of Beam Optics","Beam Spot Width in X/Y-direction/mm","")
Plot2.legend(
"x dp/p=0",
"x dp/p=8%",
"x dp/p=-7%",
"y dp/p=0",
"y dp/p=8%",
"y dp/p=-7%",
)
Plot2.ylim(2.5,4.5)
Plot2.xlim(-1,15)
Plot2.show()
|
from CreateSpiral import *
createSpiral(-3)
createSpiral(0)
createSpiral('a')
createSpiral(1)
createSpiral(3)
createSpiral(5)
createSpiral(10) |
import sunspec2.modbus.modbus as modbus_client
import pytest
import socket
import serial
import sunspec2.tests.mock_socket as MockSocket
import sunspec2.tests.mock_port as MockPort
def test_modbus_rtu_client(monkeypatch):
monkeypatch.setattr(serial, 'Serial', MockPort.mock_port)
c = modbus_client.modbus_rtu_client('COMM2')
assert c.baudrate == 9600
assert c.parity == "N"
assert modbus_client.modbus_rtu_clients['COMM2']
with pytest.raises(modbus_client.ModbusClientError) as exc1:
c2 = modbus_client.modbus_rtu_client('COMM2', baudrate=99)
assert 'Modbus client baudrate mismatch' in str(exc1.value)
with pytest.raises(modbus_client.ModbusClientError) as exc2:
c2 = modbus_client.modbus_rtu_client('COMM2', parity='E')
assert 'Modbus client parity mismatch' in str(exc2.value)
def test_modbus_rtu_client_remove(monkeypatch):
monkeypatch.setattr(serial, 'Serial', MockPort.mock_port)
c = modbus_client.modbus_rtu_client('COMM2')
assert modbus_client.modbus_rtu_clients['COMM2']
modbus_client.modbus_rtu_client_remove('COMM2')
assert modbus_client.modbus_rtu_clients.get('COMM2') is None
def test___generate_crc16_table():
pass
def test_computeCRC():
pass
def test_checkCRC():
pass
class TestModbusClientRTU:
def test___init__(self, monkeypatch):
monkeypatch.setattr(serial, 'Serial', MockPort.mock_port)
c = modbus_client.ModbusClientRTU(name="COM2")
assert c.name == "COM2"
assert c.baudrate == 9600
assert c.parity is None
assert c.serial is not None
assert c.timeout == .5
assert c.write_timeout == .5
assert not c.devices
def test_open(self, monkeypatch):
monkeypatch.setattr(serial, 'Serial', MockPort.mock_port)
c = modbus_client.ModbusClientRTU(name="COM2")
c.open()
assert c.serial.connected
def test_close(self, monkeypatch):
monkeypatch.setattr(serial, 'Serial', MockPort.mock_port)
c = modbus_client.ModbusClientRTU(name="COM2")
c.open()
c.close()
assert not c.serial.connected
def test_add_device(self, monkeypatch):
monkeypatch.setattr(serial, 'Serial', MockPort.mock_port)
c = modbus_client.ModbusClientRTU(name="COM2")
c.add_device(1, "1")
assert c.devices.get(1) is not None
assert c.devices[1] == "1"
def test_remove_device(self, monkeypatch):
monkeypatch.setattr(serial, 'Serial', MockPort.mock_port)
c = modbus_client.ModbusClientRTU(name="COM2")
c.add_device(1, "1")
assert c.devices.get(1) is not None
assert c.devices[1] == "1"
c.remove_device(1)
assert c.devices.get(1) is None
def test__read(self):
pass
def test_read(self, monkeypatch):
monkeypatch.setattr(serial, 'Serial', MockPort.mock_port)
c = modbus_client.ModbusClientRTU(name="COM2")
in_buff = [b'\x01\x03\x8cSu', b'nS\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00TestDevice-1\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00opt_a_b_c\x00'
b'\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'sn-123456789\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\xb7d']
check_req = b'\x01\x03\x9c@\x00F\xeb\xbc'
c.open()
c.serial._set_buffer(in_buff)
check_read = in_buff[0] + in_buff[1]
assert c.read(1, 40000, 70) == check_read[3:-2]
assert c.serial.request[0] == check_req
def test__write(self):
pass
def test_write(self, monkeypatch):
monkeypatch.setattr(serial, 'Serial', MockPort.mock_port)
c = modbus_client.ModbusClientRTU(name="COM2")
c.open()
data_to_write = b'v0.0.0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00sn-000\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
buffer = [b'\x01\x10\x9cl\x00', b'\x18.N']
c.serial._set_buffer(buffer)
c.write(1, 40044, data_to_write)
check_req = b'\x01\x10\x9cl\x00\x180v0.0.0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00sn-000\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\xad\xff'
assert c.serial.request[0] == check_req
class TestModbusClientTCP:
def test___init__(self):
c = modbus_client.ModbusClientTCP()
assert c.slave_id == 1
assert c.ipaddr == '127.0.0.1'
assert c.ipport == 502
assert c.timeout == 2
assert c.ctx is None
assert c.trace_func is None
assert c.max_count == 125
def test_close(self, monkeypatch):
monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket)
c = modbus_client.ModbusClientTCP()
c.connect()
assert c.socket
c.disconnect()
assert c.socket is None
def test_connect(self, monkeypatch):
c = modbus_client.ModbusClientTCP()
with pytest.raises(Exception) as exc:
c.connect()
assert 'Connection error' in str(exc.value)
monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket)
c.connect()
assert c.socket is not None
assert c.socket.connected is True
assert c.socket.ipaddr == '127.0.0.1'
assert c.socket.ipport == 502
assert c.socket.timeout == 2
def test_disconnect(self, monkeypatch):
monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket)
c = modbus_client.ModbusClientTCP()
c.connect()
assert c.socket
c.disconnect()
assert c.socket is None
def test__read(self, monkeypatch):
pass
def test_read(self, monkeypatch):
c = modbus_client.ModbusClientTCP()
monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket)
in_buff = [b'\x00\x00\x00\x00\x00\x8f\x01\x03\x8c', b'SunS\x00\x01\x00BSunSpecTest\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00TestDevice-1\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00opt_a_b_c'
b'\x00\x00\x00\x00\x00\x00\x001.2.3\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00sn-123456789\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x01\x00\x00']
check_req = b'\x00\x00\x00\x00\x00\x06\x01\x03\x9c@\x00F'
c.connect()
c.socket._set_buffer(in_buff)
assert c.read(40000, 70) == in_buff[1]
assert c.socket.request[0] == check_req
def test__write(self, monkeypatch):
pass
def test_write(self, monkeypatch):
c = modbus_client.ModbusClientTCP()
monkeypatch.setattr(socket, 'socket', MockSocket.mock_socket)
c.connect()
data_to_write = b'sn-000\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
buffer = [b'\x00\x00\x00\x00\x00\x06\x01\x10\x9c', b't\x00\x10']
c.socket._set_buffer(buffer)
c.write(40052, data_to_write)
check_req = b"\x00\x00\x00\x00\x00'\x01\x10\x9ct\x00\x10 sn-000\x00\x00\x00\x00\x00\x00\x00" \
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
assert c.socket.request[0] == check_req
|
import os
import sys
import argparse
import errno
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d, proj3d
import torch
from torch.nn import init
def make_D_label(input, value, device, random=False):
if random:
if value == 0:
lower, upper = 0, 0.205
elif value ==1:
lower, upper = 0.8, 1.05
D_label = torch.FloatTensor(input.data.size()).uniform_(lower, upper).to(device)
else:
D_label = torch.FloatTensor(input.data.size()).fill_(value).to(device)
return D_label
def init_weights(net, init_type="kaiming", init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_funce
def check_exist_or_mkdirs(path):
'''thread-safe mkdirs if not exist'''
try:
if not os.path.exists(path):
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def vis_pts(pts, clr, cmap):
fig = plt.figure()
fig.set_rasterized(True)
ax = axes3d.Axes3D(fig)
ax.set_alpha(0)
ax.set_aspect('equal')
min_lim = pts.min()
max_lim = pts.max()
ax.set_xlim3d(min_lim,max_lim)
ax.set_ylim3d(min_lim,max_lim)
ax.set_zlim3d(min_lim,max_lim)
if clr is None:
M = ax.get_proj()
_,_,clr = proj3d.proj_transform(pts[:,0], pts[:,1], pts[:,2], M)
clr = (clr-clr.min())/(clr.max()-clr.min())
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
ax.scatter(
pts[:,0],pts[:,1],pts[:,2],
c=clr,
zdir='x',
s=20,
cmap=cmap,
edgecolors='k'
)
return fig
def count_parameter_num(params):
cnt = 0
for p in params:
cnt += np.prod(p.size())
return cnt
class TrainTestMonitor(object):
def __init__(self, log_dir, plot_loss_max=4., plot_extra=False):
assert(os.path.exists(log_dir))
stats_test = np.load(os.path.join(log_dir, 'stats_test.npz'))
stats_train_running = np.load(os.path.join(log_dir, 'stats_train_running.npz'))
self.title = os.path.basename(log_dir)
self.fig, self.ax1 = plt.subplots()
self.ax2 = self.ax1.twinx()
plt.title(self.title)
# Training loss
iter_loss = stats_train_running['iter_loss']
self.ax1.plot(iter_loss[:,0], iter_loss[:,1],'-',label='train loss',color='r',linewidth=2)
self.ax1.set_ylim([0, plot_loss_max])
self.ax1.set_xlabel('iteration')
self.ax1.set_ylabel('loss')
# Test accuracy
iter_acc = stats_test['iter_acc']
max_accu_pos = np.argmax(iter_acc[:,1])
test_label = 'max test accuracy {:.3f} @ {}'.format(iter_acc[max_accu_pos,1],max_accu_pos+1)
self.ax2.plot(iter_acc[:,0], iter_acc[:,1],'o--',label=test_label,color='b',linewidth=2)
self.ax2.set_ylabel('accuracy')
if plot_extra:
# Training accuracy
iter_acc = stats_train_running['iter_acc']
self.ax2.plot(iter_acc[:,0], iter_acc[:,1],'--',label='train accuracy',color='b',linewidth=.8)
# Test loss
iter_loss = stats_test['iter_loss']
self.ax1.plot(iter_loss[:,0], iter_loss[:,1],'--',label='test loss',color='r',linewidth=.8)
self.ax1.legend(loc='upper left', framealpha=0.8)
self.ax2.legend(loc='lower right', framealpha=0.8)
self.fig.show()
def main(args):
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(sys.argv[0])
args = parser.parse_args(sys.argv[1:])
args.script_folder = os.path.dirname(os.path.abspath(__file__))
main(args)
|
import networkx as nx
import pandas as pd
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
from math import log
##################################
######### 读取边列表 #########
##################################
# 社交网络数据集
NetWorks=['twitter', 'gplus', 'hamster', 'advogato']
for network in NetWorks:
# 加载边数据
print('')
print('Reading {} edgelist'.format(network))
network_edges_dir = './data/{}/{}.txt'.format(network,network)
# 无向图
if network in ['hamster']:
with open(network_edges_dir, 'rb')as edges_f:
network_g = nx.read_edgelist(edges_f, nodetype=int, create_using=nx.Graph(), encoding='latin1',
data=(('weight', float),))
adj = nx.adjacency_matrix(network_g)
# 保存邻接矩阵
with open('./data/{}/{}-adj.pkl'.format(network,network), 'wb') as f:
pickle.dump(adj, f)
# 有向图
else:
with open(network_edges_dir, 'rb')as edges_f:
network_g = nx.read_edgelist(edges_f, nodetype=int, create_using=nx.DiGraph(), encoding='latin1',
data=(('weight', float),))
print('Num. weakly connected components: ', nx.number_weakly_connected_components(network_g))
adj = nx.adjacency_matrix(network_g)
# 保存邻接矩阵
with open('./data/{}/{}-adj.pkl'.format(network, network), 'wb') as f:
pickle.dump(adj, f)
|
import logging
import pymc3 as pm
logger = logging.getLogger('root')
def add_beta_binomial_model(hierarchical_model, a=1, b=1):
'''
A model for binomial observations (number of successes in a sequence of n independent experiments)
via a Binomial variable, and a Beta prior.
:param a:
:param b:
:return:
'''
with pm.Model() as hierarchical_model.pymc_model:
theta = pm.Beta("theta", a, b, shape=hierarchical_model.n_groups)
observations = []
hierarchical_model.mu_parameter = "theta"
def add_observations():
with hierarchical_model.pymc_model:
for i in range(hierarchical_model.n_groups):
observations.append(
pm.Binomial(f'y_{i}', n=hierarchical_model.y[i][:, 0], p=theta[i], observed=hierarchical_model.y[i][:, 1]))
hierarchical_model.add_observations_function = add_observations
|
from turtle import forward, right, left, shape, speed, exitonclick, circle,
shape('turtle')
speed(0)
# květ
for i in range(18):
for j in range(4):
forward(50)
left(90)
left(20)
# stonek a listy
right(90)
forward(100)
for i in range(12):
if i % 2 == 0:
left(75)
circle(50 + i * 3, 70)
left(110)
circle(50 + i * 3, 70)
left(35)
else:
right(75)
circle(-50 - i * 3, 70)
right(110)
circle(-50 - i * 3, 70)
right(35)
forward(20)
exitonclick()
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def inicio():
return render_template("inicio.html")
@app.route('/articulos')
def articulos():
return render_template("articulos.html")
@app.route('/acercade')
def acercade():
return render_template("acercade.html")
app.run("0.0.0.0",5000,debug=True) |
from sklearn.metrics.pairwise import cosine_similarity
from sentence_transformers import SentenceTransformer
import pickle
def get_cosine_similarity(feature_vec_1, feature_vec_2):
return cosine_similarity(feature_vec_1.reshape(1, -1), feature_vec_2.reshape(1, -1))[0][0]
model = SentenceTransformer('paraphrase-TinyBERT-L6-v2')
f = open('../final.txt', 'r')
word_emb = []
while True:
line = f.readline()
if not line:
break
word_emb.append([line[:-1], model.encode(line[:-1])])
f_out = open('embeddings.txt', 'w')
for j in word_emb:
f_out.write(str(j)+'\n')
f_out.close()
# pickle file
with open('embeddings.pkl', 'wb') as f_pkl:
pickle.dump(word_emb, f_pkl)
f.close()
|
import sys
def frequencyAnalyse(string):
charList = []
frequencyList = []
temp = string.replace(" ", "")
stringList = list(temp)
for i in xrange(len(stringList)):
if stringList[i] in charList:
for j in xrange(len(charList)):
if stringList[i] == charList[j] and i != j:
frequencyList[j] += 1
else:
charList.append(stringList[i])
frequencyList.append(1)
return charList, frequencyList
def indexofCoincidence(StringInp):
for keyLength in xrange(1, 100):
matchCount = 0
totalCount = 0
for i in range(len(StringInp)):
for j in range(i+keyLength, len(StringInp), keyLength):
totalCount += 1
if StringInp[i] == StringInp[j]:
matchCount += 1
ioc = float(matchCount) / float(totalCount)
print "Keylength:", keyLength, " Index of Coincidence(%):", ioc*100
if __name__ == '__main__':
stringInput = raw_input("Enter the ciphertext to be analysed, in hex: ")
stringInput = stringInput.decode("hex")
indexofCoincidence(stringInput)
|
#!/usr/bin/env python
from autodisc.cppn.twodmatrixcppnneatevolution import TwoDMatrixCCPNNEATEvolution
from autodisc.cppn.neatcppngui import NeatCPPNEvolutionGUI
def fitness_function(image, genome):
return 0
evo_config = TwoDMatrixCCPNNEATEvolution.default_config()
evo_config['is_verbose'] = True
evo_config['keep_results'] = 'all_gen'
evo_config['matrix_size'] = (128, 128)
evo_config['is_pytorch'] = True
evo = TwoDMatrixCCPNNEATEvolution(fitness_function=fitness_function, config=evo_config)
evo.do_next_generation() # generate first generation
# run the gui
gui_config = NeatCPPNEvolutionGUI.default_gui_config()
gui_config['dialog']['geometry'] = '1024x768'
gui = NeatCPPNEvolutionGUI(evolution=evo, gui_config=gui_config)
gui.run()
|
from django.urls import path
from django.urls.conf import path, re_path
from .apis import *
urlpatterns = [
path('rev_exps/add', AddRevenueExpenditureApi.as_view(), name='rev_exp_add'),
re_path(r'^rev_exps/list/(?:start=(?P<start>(?:19|20)\d{2}(0[1-9]|1[012])))&(?:end=(?P<end>(?:19|20)\d{2}(0[1-9]|1[012])))$', RevenueExpenditureListApi.as_view(), name='rev_exp_list'),
path('rev_exps/update/<int:revenue_expenditure_id>', UpdateRevenueExpenditureApi.as_view(), name='rev_exp_update'),
path('rev_exps/delete/<int:revenue_expenditure_id>', DeleteRevenueExpenditureApi.as_view(), name='rev_exp_delete'),
] |
from unittest.mock import MagicMock, Mock, patch
class ProductionClass:
def method():
pass
with patch.object(ProductionClass, "method", return_value=None) as mock_method:
thing = ProductionClass()
thing.method(1, 2, 3)
mock_method.assert_called_once_with(1, 2, 3)
# mock_method.assert_called_once_with(1, 2)
# ---
foo = {"key": "value"}
original = foo.copy()
with patch.dict(foo, {"newkey": "newvalue"}, clear=True):
assert foo == {"newkey": "newvalue"}
assert foo == original
# ---
mock = MagicMock()
mock.__str__.return_value = "foobarbaz"
print(str(mock))
mock.__str__.assert_called_with()
# ---
mock = Mock()
mock.__str__ = Mock(return_value="wheeeeeee")
print(str(mock))
|
import platform
import sys
import warnings
from setuptools import Extension
from setuptools import setup
if sys.version_info < (3, 6):
raise RuntimeError('当前ctpbee_api只支持python36以及更高版本/ ctpbee only support python36 and highly only ')
runtime_library_dir = []
long_description = ""
if platform.uname().system == "Windows":
compiler_flags = [
"/MP", "/std:c++17", # standard
"/O2", "/Ob2", "/Oi", "/Ot", "/Oy", "/GL", # Optimization
"/wd4819", # 936 code page,
"/DNOMINMAX"
]
extra_link_args = []
else:
compiler_flags = [
"-std=c++17", # standard
"-O3", # Optimization
"-Wno-delete-incomplete", "-Wno-sign-compare", "-pthread",
]
extra_link_args = ["-lstdc++"]
runtime_library_dir = ["$ORIGIN"]
vnctpmd = Extension(
"ctpbee_api.ctp.vnctpmd",
[
"ctpbee_api/ctp/vnctp/vnctpmd/vnctpmd.cpp",
],
include_dirs=[
"ctpbee_api/ctp/include",
"ctpbee_api/ctp/vnctp",
],
language="cpp",
define_macros=[],
undef_macros=[],
library_dirs=["ctpbee_api/ctp/libs", "ctpbee_api/ctp"],
libraries=["thostmduserapi_se", "thosttraderapi_se", ],
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
depends=[],
runtime_library_dirs=runtime_library_dir,
)
vnctptd = Extension(
"ctpbee_api.ctp.vnctptd",
[
"ctpbee_api/ctp/vnctp/vnctptd/vnctptd.cpp",
],
include_dirs=[
"ctpbee_api/ctp/include",
"ctpbee_api/ctp/vnctp",
],
define_macros=[],
undef_macros=[],
library_dirs=["ctpbee_api/ctp/libs",
"ctpbee_api/ctp",
],
libraries=["thostmduserapi_se", "thosttraderapi_se"],
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
runtime_library_dirs=runtime_library_dir,
depends=[],
language="cpp",
)
mini_td = Extension(
"ctpbee_api.ctp_mini.CTdApi",
[
"ctpbee_api/ctp_mini/vnmini/vnminitd/vnminitd.cpp",
],
include_dirs=[
"ctpbee_api/ctp_mini/include",
"ctpbee_api/ctp_mini/vnmini",
"ctpbee_api/ctp_mini/vnmini/vnminitd",
],
define_macros=[],
undef_macros=[],
library_dirs=["ctpbee_api/ctp_mini/libs",
"ctpbee_api/ctp_mini",
],
libraries=["thostmduserapi", "thosttraderapi"],
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
runtime_library_dirs=runtime_library_dir,
depends=[],
language="cpp",
)
mini_md = Extension(
"ctpbee_api.ctp_mini.CMdApi",
[
"ctpbee_api/ctp_mini/vnmini/vnminimd/vnminimd.cpp",
],
include_dirs=[
"ctpbee_api/ctp_mini/include",
"ctpbee_api/ctp_mini/vnmini",
"ctpbee_api/ctp_mini/vnmini/vnminimd",
],
define_macros=[],
undef_macros=[],
library_dirs=["ctpbee_api/ctp_mini/libs",
"ctpbee_api/ctp_mini",
],
libraries=["thostmduserapi", "thosttraderapi"],
extra_compile_args=compiler_flags,
extra_link_args=extra_link_args,
runtime_library_dirs=runtime_library_dir,
depends=[],
language="cpp",
)
if platform.system() == "Windows":
ext_modules = []
elif platform.system() == "Darwin":
warnings.warn("因为官方并没有发布基于mac的api, 所以当前ctpbee并不支持mac下面的ctp接口")
ext_modules = []
else:
ext_modules = [vnctptd, vnctpmd, mini_td, mini_md]
pkgs = ['ctpbee_api.ctp', "ctpbee_api", "ctpbee_api.ctp_mini"]
setup(
name='ctpbee_api',
version=0.2,
description="single CTP API support, From VNPY",
author='somewheve',
long_description=long_description,
author_email='somewheve@gmail.com',
url='https://github.com/ctpbee/ctpbee_api',
license="MIT",
packages=pkgs,
install_requires=[],
platforms=["Windows", "Linux"],
include_package_data=True,
package_dir={'ctpbee_api': 'ctpbee_api'},
package_data={'ctpbee_api': ['ctp/*', "ctp_mini/*"]},
ext_modules=ext_modules,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
]
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple model for path recursive walk.
Run script: dick_path_scanner PATH
PATH - can be absolute or relative
"""
import os
class RecursivePathWalker(object):
""" Interface for recursive path walk"""
def __init__(self, path):
self.path = path
def enumerate_path(self):
"""
Returns generator of path to all files in dir
:return: generator of path to files
"""
return (os.path.join(dir_path, file_name)
for dir_path, dir_names, file_names in os.walk(self.path)
for file_name in file_names)
def enumerate_files(self):
"""
Returns generator of file for the path
:return: generator of files from path
"""
return (file_name
for dir_path, dir_names, file_names in os.walk(self.path)
for file_name in file_names)
def enumerate_dirs(self):
"""
Returns generator of subdir for the path
:return: generator of subdir
"""
return (dir_name
for dir_path, dir_names, file_names in os.walk(self.path)
for dir_name in dir_names)
if __name__ == '__main__':
import sys
if '-h' in sys.argv or '--help' in sys.argv:
print(__doc__)
sys.exit(1)
if not len(sys.argv) == 2:
print('PATH is mandatory')
print(__doc__)
sys.exit(1)
PATH = sys.argv[1]
path_walker = RecursivePathWalker(PATH)
print('\n--**-- START --**--\n')
print('1) Recursive listing of all paths in a dir\n')
for path in path_walker.enumerate_path():
print(path)
print('\n2) Recursive listing of all files in a dir\n')
for file_name in path_walker.enumerate_files():
print(file_name)
print('\n3) Recursive listing of all dirs in a dir\n')
for dir_name in path_walker.enumerate_dirs():
print(dir_name)
print('\n--**-- THE END --**--\n')
|
from enum import Enum
class Architecture(Enum):
amd64 = 0
armv7 = 1
class ResultStatus(Enum):
Completed = 0
Declined = 1
JobDescriptionError = 2
JobNotFound = 3
MemoryExceeded = 4
StorageExceeded = 5
InstructionsExceeded = 6
BandwidthExceeded = 7
ExceptionOccured = 8
DirectoryUnavailable = 9
class Party(Enum):
ResourceProvider = 0
JobCreator = 1
class Verdict(Enum):
ResultNotFound = 0
TooMuchCost = 1
WrongResults = 2
CorrectResults = 3
InvalidResultStatus = 4
class Reaction(Enum):
Accepted = 0
Rejected = 1
_None = 2
class EtherTransferCause(Enum):
PostJobOffer = 0
PostResourceOffer = 1
CancelJobOffer = 2
CancelResOffer = 3
Punishment = 4
Mediation = 5
FinishingJob = 6
FinishingResource = 7
PostMatch = 8
MediatorAvailability = 9
|
"""Defines URL patterns for learning_logs."""
from django.urls import path
from . import views
urlpatterns = [
# Home page
path('', views.index, name='index'),
# Query Menu
path('dblists/', views.dblists, name='dblists'),
# List Clients
path('dblists/clients/', views.clients, name='clients'),
# List Products
path('dblists/products/', views.products, name='products'),
# List Sales
path('dblists/sales/', views.sales, name='sales'),
# All sales of today
path('dblists/td_sales/<int:dia>/<int:page>/', views.todays_sales, name='td_sales'),
# Sales of the month by day
path('dblists/mn_sales/<int:month>', views.months_sales, name='mn_sales'),
# Sales of the year by month
path('dblists/yr_sales/', views.years_sales, name='yr_sales'),
# List Movements in a Sale
path('dblists/sales/<int:sale_id>/', views.sale, name='sale'),
# Edit or delete movements in a sale
path('dblists/sales/<int:sale_id>/edit/', views.edit_sale, name='edit_sale'),
# Deletes Sale
path('dblists/sales/<int:sale_id>/edit/delete', views.delete_sale, name='delete_sale'),
# Choose what to create
path('lancar/', views.create, name='creation'),
# Create Client
path('lancar/client/', views.create_client, name='client_creation'),
# Create Family
path('lancar/family/', views.create_family, name='family_creation'),
# Create Product
path('lancar/product/', views.create_product, name='product_creation'),
# Create Sale
path('lancar/sale/', views.create_sale, name='sale_creation'),
] |
# 算法一
def str_compress1(string):
result = []
current = string[0]
count = 1
for s in string[1:]:
if s == current:
count += 1
else:
# result += current + str(count)
result.append(current)
result.append(str(count))
current = s
count = 1
# result += current + str(count)
result.append(current)
result.append(str(count))
# return result
return ''.join(result)
# 算法二
# 使用itertools模块的groupby方法
from itertools import groupby
def str_compress2(string):
result = []
for key, group in groupby(string):
result.append(key)
result.append(str(len(list(group))))
return ''.join(result)
if __name__ == '__main__':
s = 'abbbbffcccdddcc'
print(str_compress1(s))
print(str_compress2(s))
|
#!/usr/bin/env python3
"""
Spooloff Oracle data file parser to csv file
"""
from optparse import OptionParser
import fnmatch
import logging
import os
import re
def gen_find(filepattern, top):
for path, _, filelist in os.walk(top):
break
for name in fnmatch.filter(filelist, filepattern):
yield os.path.join(path, name)
def gen_open_files(filenames):
for name in filenames:
f = open(name)
logging.info("Open file %s or process" % name)
yield f
f.close()
def gen_cat(sources):
logging.info("Starting processing the file")
for s in sources:
name = str(s.name)
for line in s:
yield line, name
def get_mapped_value(iterator):
for i in range(len(iterator)):
if not iterator[0].startswith("\n") and not iterator[0].startswith("-"):
it = re.sub(r",\s+", ",", iterator[0]).strip(" ")
return {iterator[1]: it}
def cur_val_check(cv, lv):
if not cv.split(",")[0]:
fv = len(cv.split(","))
if not lv:
lv = cv
lv = re.sub(r"\n", "", lv)
res = lv + " " + cv.split(",")[-1]
return True, res
if len(cv.split(",")) <= 1:
return False, ""
return False, cv
def get_line_for_parse(line):
last_value = ""
ret = {}
strline = map(get_mapped_value, line)
for item in strline:
if isinstance(item, dict):
for k, v in item.items():
fname = k
current_value = v
flag, rt = cur_val_check(current_value, last_value)
last_value = v
if flag:
ret.setdefault(fname, []).pop(-1)
ret.setdefault(fname, []).append(rt)
return ret
def writer_to_csv(dt):
counter = 0
for k, v in dt.items():
counter += 1
k = re.sub(r"[\'./]", "", str(k))
k = re.sub(r"\[|", "", str(k))
if not os.path.exists("./out"):
logging.info("Creating dir ./out/")
os.mkdir("./out/")
with open(file="./out/" + k + "_out_" + ".csv", encoding="utf-8", mode="w+") as fn:
logging.info("Starting saving files to path ./out/%s" % k + "_out_" + ".csv")
fn.writelines(v)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
op = OptionParser()
op.add_option("-d", "--dir", action="store", type=str, help="Dir with spool offs Oracle data files. Default=./",
default="./")
op.add_option("-m", "--filemask", action="store", type=str, help="Files mask like *.txt. Default=*.txt",
default="*.txt")
op.add_option("-l", "--log", action="store", type=str, help="Log filename.", default="app_spool_parser.log")
# op.description("Example python(version) oraclespooloffparser.py -d <directr> ")
(opts, args) = op.parse_args()
logging.basicConfig(filename=opts.log, level=logging.INFO,
format='[%(asctime)s] %(levelname).1s %(message)s', datefmt='%Y.%m.%d %H:%M:%S')
if opts.dir:
try:
logging.info("Starting processing files for dir %s" % opts.dir)
filenames = gen_find(opts.filemask, opts.dir)
txtfiles = gen_open_files(filenames)
txtline = gen_cat(txtfiles)
line = get_line_for_parse(txtline)
writer_to_csv(line)
except Exception as e:
logging.exception("No files in %s", opts.dir)
|
# win10 python3.10 maya2018
# python module
import os,subprocess,re
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askdirectory,askopenfilenames
from tkinter.scrolledtext import ScrolledText
# local module
import deadlineSubmission
import configFile
# extra module
import windnd
def generateCmdStruct():
'''
循环每个maya文件,生成mel转成cmd的代码列表
返回3个列表[],[0]是maya文件,[1]提交maya的cmd代码,[2]提交deadline的cmd代码
'''
mayaInstallPathCorrect= correctWinPath(mayaInstallPathEntry.get()+'/bin')
outputPath = correctWinPath(outputPathEntry.get())
melCommand = melCommandScrolledText.get(1.0,END).strip().replace('\n','').replace('\r','').replace('"',r'\"') #多行转单行
# 系统cmd的"<>"表示命令输入输出,去异成"^<^>",用正则对照列表一一对应替换
mayaMelMatch=['<Scene>','<RenderLayer>','<Camera>','<RenderPassFileGroup>','<RenderPass>','<RenderPassType>','<Extension>','<Version>']
mayaMelMatchToCmd=['^<Scene^>','^<RenderLayer^>','^<Camera^>','^<RenderPassFileGroup^>','^<RenderPass^>','^<RenderPassType^>','^<Extension^>','^<Version^>']
for i in range(len(mayaMelMatch)):
if re.search(re.escape(mayaMelMatch[i]),melCommand,re.I):
melCommand = re.sub(re.escape(mayaMelMatch[i]),mayaMelMatchToCmd[i],melCommand,flags=re.I)
# mel代码中含有{###.abc},用于输出文件格式比如abc(这里需要优化代码??????????)
try:fileExt=melCommand.split("{###.")[1].split("}",1)[0]
except:fileExt=''
# 过滤所有maya文件
mayaFullNameFilterList=mayaFilesEntry.get().split(';')
mayaFullNameList=[]
for i in mayaFullNameFilterList:
files = filterFileExt(i.strip(), ('.ma','.mb'))
if files:
[mayaFullNameList.append(j) for j in files]
# 修正每个maya文件路径格式
mayaFullNameList = [correctWinPath(i) for i in mayaFullNameList]
# 循环每个maya文件
localExecuteCmdList=[]
submitDeadlineCmdList=[]
for i in mayaFullNameList:
mayaName = os.path.splitext(i)[0].split('/')[-1].lstrip('"')
# replace {###.abc} to outputName.ext, use export ABC file
melCommandCorrect = melCommand.replace('{###.'+fileExt+'}',f'{outputPath}/{mayaName}.{fileExt}')
localExecuteCmd=f'''mayabatch.exe -file {i} -command "{melCommandCorrect}"'''
localExecuteCmdList.append(localExecuteCmd)
submitDeadlineCmd=f'''{mayaInstallPathCorrect}/mayabatch.exe -file {i} -command "{melCommandCorrect}"'''
submitDeadlineCmdList.append(submitDeadlineCmd)
return mayaFullNameList,localExecuteCmdList,submitDeadlineCmdList,
def ExecuteLocalCmd(execute=False):
'''
本机执行代码,还是打印出来(用于测试)
'''
mayaInstallPath=mayaInstallPathEntry.get()+'/bin'
if execute:
for i in generateCmdStruct()[1]:
subprocess.run(i,cwd=mayaInstallPath,shell=True,encoding="utf-8",check=False)
else:
for i in generateCmdStruct()[1]:
print(i)
doneMessage(tk)
def ExecuteCmdToDeadline(execute=False):
'''
提交给deadline执行代码,还是打印出来(用于测试)
'''
deadlineInstallPath=deadlineInstallPathEntry.get()
outputPath = correctWinPath(outputPathEntry.get())
if execute:
for i in range(len(generateCmdStruct()[0])):
mayaFile=generateCmdStruct()[0][i]
mayaName=os.path.splitext(os.path.basename(mayaFile))[0]
deadlineSubmission.quickCmdSubmit(deadlineInstallPath,outputPath,mayaName,generateCmdStruct()[2][i])
else:
for i in range(len(generateCmdStruct()[0])):
print(generateCmdStruct()[2][i])
doneMessage(tk)
def ExecuteArnoldToDeadline(execute=False):
'''
提交给deadline执行代码,还是打印出来(用于测试)
'''
deadlineInstallPath=deadlineInstallPathEntry.get()
mayaInstallPath=mayaInstallPathEntry.get()+'/bin'
if execute:
for i in range(len(generateCmdStruct()[0])):
subprocess.run(generateCmdStruct()[1][i],cwd=mayaInstallPath,shell=True,encoding="utf-8",check=False)
deadlineSubmission.quickArnoldSubmit(deadlineInstallPath,mayaInstallPathEntry.get()+'/bin',generateCmdStruct()[0][i])
else:
for i in range(len(generateCmdStruct()[0])):
print(generateCmdStruct()[2][i])
doneMessage(tk)
def correctWinPath(path):
'''
纠正路径错误:1反斜杠"\"改成正斜杠"/";2带空格的路径加上双引号
'''
absPath = os.path.abspath(path)
splitPath = absPath.split('\\')
for i in range( len(splitPath)):
if ' ' in splitPath[i]:
splitPath[i] = '"' + splitPath[i] + '"'
windowsPath = '/'.join(splitPath)
return windowsPath
def filterFileExt(path=r'c:/a.txt', fileFilterExt=['.txt','.mp4']):
'''
path是文件或文件夹,返回文件夹里一层(path)所有对应文件格式(fileFilterExt);是文件就判断文件名(path)是否对应fileFilterExt
Output: 文件全路径(list)
'''
if os.path.isdir(path):
fileLists = [os.path.abspath(path)+'/'+i for i in os.listdir(path) if os.path.isfile(path+'/'+i)]
files = [i for i in fileLists if os.path.splitext(i)[1] in fileFilterExt]
return files
elif os.path.splitext(path)[1] in fileFilterExt:
file = []
file.append(path)
return file
def about(mainTk):
top=Toplevel()
mainWidth=mainTk.winfo_width()
mainHight=mainTk.winfo_height()
mainXPos=mainTk.winfo_x()
mainYPos=mainTk.winfo_y()
ToplevelWidth=350
ToplevelHight=230
ToplevelXPos=(mainWidth-ToplevelWidth)/2+mainXPos
ToplevelYPos=(mainHight-ToplevelHight)/2+mainYPos
top.geometry( "%dx%d+%d+%d" % (ToplevelWidth,ToplevelHight,ToplevelXPos,ToplevelYPos))
Label(top,justify='left',text='说明:\n1. 此脚本导出文件的格式,比如导出abc是 {###.abc}\n所以mel里面不要有冲突关键符"{###."和"\\n"\n2. 直接执行mel,记得最后加上“ file -s; ”').grid(row=0,sticky='w')
Label(top,justify='left',text='').grid(row=1,sticky='w')
Label(top,justify='left',text=r'制作:天雷动漫').grid(row=2,sticky='w')
Label(top,justify='left',text=r'测试环境:win10 python3.9 maya2018').grid(row=3,sticky='w')
Label(top,justify='left',text='源码:\nhttps://github.com/handierchan/MayaExecuteMel\nhttps://gitee.com/handierchan/MayaExecuteMel').grid(row=4,sticky='w')
Button(top,text='退出',command=lambda:top.destroy()).grid(row=5,sticky='w')
def doneMessage(mainTk):
top=Toplevel()
mainWidth=mainTk.winfo_width()
mainHight=mainTk.winfo_height()
mainXPos=mainTk.winfo_x()
mainYPos=mainTk.winfo_y()
ToplevelWidth=150
ToplevelHight=100
ToplevelXPos=(mainWidth-ToplevelWidth)/2+mainXPos
ToplevelYPos=(mainHight-ToplevelHight)/2+mainYPos
top.geometry( "%dx%d+%d+%d" % (ToplevelWidth,ToplevelHight,ToplevelXPos,ToplevelYPos))
top.attributes('-topmost',True)
Message(top,text='执行完成,查看是否成功').pack()
Button(top,text='退出',command=lambda:top.destroy()).pack()
def errorMessage(mainTk):
top=Toplevel()
mainWidth=mainTk.winfo_width()
mainHight=mainTk.winfo_height()
mainXPos=mainTk.winfo_x()
mainYPos=mainTk.winfo_y()
ToplevelWidth=150
ToplevelHight=100
ToplevelXPos=(mainWidth-ToplevelWidth)/2+mainXPos
ToplevelYPos=(mainHight-ToplevelHight)/2+mainYPos
top.geometry( "%dx%d+%d+%d" % (ToplevelWidth,ToplevelHight,ToplevelXPos,ToplevelYPos))
top.attributes('-topmost',True)
Message(top,text='执行出错!!').pack()
Button(top,text='退出',command=lambda:top.destroy()).pack()
def tkGUIPosition(tkinter,addWidth=10,addHight=10):
tkinter.resizable(0,0)
tkinter.update()
tkGUIWidth = tkinter.winfo_width()
tkGUIHeigth = tkinter.winfo_height()
screenWidth = tkinter.winfo_screenwidth()
screenHeight = tkinter.winfo_screenheight()
tkinter.geometry("%dx%d+%d+%d"%(tkGUIWidth+addWidth,tkGUIHeigth+addHight,(screenWidth-tkGUIWidth)/2,(screenHeight-tkGUIHeigth)/2))
if __name__=='__main__':
softwareName='MayaExecuteMel'
tk=Tk()
tk.title(softwareName)
mayaInstallPathVar=StringVar(tk,value=r'C:/Program Files/Autodesk/Maya2018')
deadlineInstallPathVar=StringVar(tk,value=r'C:/Program Files/Thinkbox/Deadline10')
mayaFilesVar=StringVar(tk)
outputPathVar=StringVar(tk)
def defaultParameters():
mayaInstallPathVar.set(r'C:/Program Files/Autodesk/Maya2018')
deadlineInstallPathVar.set(r'C:/Program Files/Thinkbox/Deadline10')
mayaFilesVar.set('')
outputPathVar.set('')
melCommandScrolledText.delete(1.0,END)
def selectMayaFiles(tkVar):
selectPath=askopenfilenames(filetypes=[('Maya Files',('*.ma;*.mb'))])
tkVar.set('; '.join(selectPath))
def selectoutputPath(tkVar):
selectPath=askdirectory()
tkVar.set(selectPath)
### 界面
Label(tk, text='Maya Install Path').grid(row=0, column=0, sticky='e',ipadx=10)
mayaInstallPathEntry = Entry(tk, textvariable=mayaInstallPathVar)
mayaInstallPathEntry.grid(row=0, column=1, sticky='w',ipadx=150)
defaultButton=Button(tk, text='Default',width=8,command=lambda:defaultParameters())
defaultButton.grid(row=0, column=2, sticky='w')
Label(tk, text='Deadline Install Path').grid(row=1, column=0, sticky='e',ipadx=10)
deadlineInstallPathEntry = Entry(tk, textvariable=deadlineInstallPathVar)
deadlineInstallPathEntry.grid(row=1, column=1, sticky='w',ipadx=150)
Label(tk, text='Maya Files/Path').grid(row=2, column=0, sticky='e',ipadx=10)
mayaFilesEntry = Entry(tk, textvariable=mayaFilesVar)
mayaFilesEntry.grid(row=2, column=1, sticky='w',ipadx=250)
mayaFilesButton=Button(tk, text='Select', width=8,command=lambda:selectMayaFiles(mayaFilesVar))
mayaFilesButton.grid(row=2, column=2, sticky='w')
Label(tk, text='Output Path').grid(row=3, column=0, sticky='e',ipadx=10)
outputPathEntry = Entry(tk, textvariable=outputPathVar)
outputPathEntry.grid(row=3, column=1, sticky='w',ipadx=250)
outputPathButton=Button(tk, text='Select', width=8,command=lambda:selectoutputPath(outputPathVar))
outputPathButton.grid(row=3, column=2, sticky='w')
Label(tk, text='Mel Command').grid(row=4, column=0, sticky='ne',ipadx=10)
melCommandScrolledText = ScrolledText(tk,width='80',height='15',wrap='word')
# melCommandScrolledText.insert(1.0,'aaaaaa')
melCommandScrolledText.grid(row=4,column=1,sticky='we',columnspan=2)
melCommandScrolledText.focus()
Label(tk, text='(1.Maya 文件路径不支持中文和空格;2.代码不支持中文和注释)',fg='SaddleBrown').grid(row=5, column=1, sticky='w')
localConvertButton = Button(tk,text='Local Render',fg='green',width=15,command=lambda:ExecuteLocalCmd(1))
localConvertButton.grid(row=6,column=1,sticky='w')
cmdToDeadlineButton=Button(tk,text='CMD to Deadline',fg='green',width=20,command=lambda:ExecuteCmdToDeadline(1))
cmdToDeadlineButton.grid(row=6,column=1,sticky='w',padx=130)
arnoldToDeadlineButton=Button(tk,text='Arnold to Deadline',fg='green',width=20,command=lambda:ExecuteArnoldToDeadline(1))
arnoldToDeadlineButton.grid(row=6,column=1,sticky='e',padx=130)
aboutButton=Button(tk,text='About',command=lambda:about(tk))
aboutButton.grid(row=6, column=2, sticky='e')
### Save/Load History Config
def getHistoryConfig():
historyConfig={
'MayaInstallPath':mayaInstallPathEntry.get(),
'DeadlineInstallPath':deadlineInstallPathEntry.get(),
'MayaFiles':mayaFilesVar.get(),
'OutputPath':outputPathVar.get(),
'melCommand':melCommandScrolledText.get(1.0, END),
}
return historyConfig
def quitWindow():
try:configFile.configDictToFile(configFile.createAppDataPath(softwareName,'')+'/history.txt',getHistoryConfig())
except:pass
tk.quit()
tk.destroy()
exit()
try:
configDict=configFile.configFileToDict(configFile.createAppDataPath(softwareName,'')+'/history.txt')
mayaInstallPathVar.set(configDict.get('MayaInstallPath'))
deadlineInstallPathVar.set(configDict.get('DeadlineInstallPath'))
mayaFilesVar.set(configDict.get('MayaFiles'))
outputPathVar.set(configDict.get('OutputPath'))
melCommandScrolledText.insert(1.0,configDict.get('melCommand'))
except:pass
### 鼠标拖动复制字符
def mayaInstallPathEntry_MouseDrag(files):
if os.path.isdir(files[0]): mayaInstallPathVar.set(files[0])
def deadlineInstallPathEntry_MouseDrag(files):
if os.path.isdir(files[0]): deadlineInstallPathVar.set(files[0])
def mayaFilesEntry_MouseDrag(files):
filesFilters=[i for i in files if os.path.isfile(i)]
filesList='; '.join((i for i in filesFilters))
mayaFilesVar.set(filesList)
def outputPathEntry_MouseDrag(files):
if os.path.isdir(files[0]): outputPathVar.set(files[0])
def melCommandScrolledText_MouseDrag(files):
with open(files[0], "r", encoding='utf-8') as r:
try:text=r.read()
except:text=''
melCommandScrolledText.delete(1.0,END)
melCommandScrolledText.insert(1.0,text)
windnd.hook_dropfiles(mayaInstallPathEntry,func=mayaInstallPathEntry_MouseDrag,force_unicode=1)
windnd.hook_dropfiles(deadlineInstallPathEntry,func=deadlineInstallPathEntry_MouseDrag,force_unicode=1)
windnd.hook_dropfiles(mayaFilesEntry,func=mayaFilesEntry_MouseDrag,force_unicode=1)
windnd.hook_dropfiles(outputPathEntry,func=outputPathEntry_MouseDrag,force_unicode=1)
windnd.hook_dropfiles(melCommandScrolledText,func=melCommandScrolledText_MouseDrag,force_unicode=1) #gui行数太小会卡崩???????
# Button 颜色事件
def SetBGColor(event):
event.widget.config(bg='DarkSeaGreen')
def ReturnBGColor(event):
event.widget.config(bg='SystemButtonFace')
for i in [mayaFilesButton,outputPathButton,localConvertButton,cmdToDeadlineButton,arnoldToDeadlineButton]:
i.bind("<Enter>", SetBGColor)
i.bind("<Leave>", ReturnBGColor)
tkGUIPosition(tk,addWidth=30,addHight=10)
tk.protocol("WM_DELETE_WINDOW",quitWindow)
tk.mainloop()
|
from package_template import increment
class TestApp:
def test_increment(self):
arg = 0
assert increment(0) == 1 |
def cached_method(fnc):
cache = {}
def result(self, *args):
try:
return cache[args]
except:
value = fnc(self, *args)
cache[args] = value
return value
result._cache = cache
return result
class OpenStruct(dict):
def __init__(self, *args, **kws):
dict.__init__(self, *args, **kws)
self.__dict__ = self
|
#!/usr/bin/env python3
"""Render Rosette API dependency parse trees as SVG via Graphviz"""
import argparse
import os
import re
import subprocess
import sys
import urllib
from operator import itemgetter, methodcaller
from getpass import getpass
EXTERNALS = ('rosette_api',)
try:
from rosette.api import API, DocumentParameters
except ImportError:
message = '''This script depends on the following modules:
{}
If you are missing any of these modules, install them with pip3:
$ pip3 install {}'''
print(
message.format('\n\t'.join(EXTERNALS), ' '.join(EXTERNALS)),
file=sys.stderr
)
sys.exit(1)
DEFAULT_ROSETTE_API_URL = 'https://api.rosette.com/rest/v1/'
ROOT = 'root'
NODE = '{index} [label="{token}"]\n'
EDGE = '{governorTokenIndex} -> {dependencyTokenIndex} [label="{relationship}"]'
STYLE = '''
edge [dir="forward", arrowhead="open", arrowsize=0.5]
node [shape="box", height=0]
'''
def request(content, endpoint, api, language=None, uri=False, **kwargs):
"""Request Rosette API results for the given content and endpoint.
This method gets the requested results from the Rosette API as JSON. If
api's output parameter has been set to "rosette" then the JSON will consist
of an A(nnotated) D(ata) M(odel) or ADM. An ADM is a Python dict
representing document content, annotations of the document content,
and document metadata.
content: path or URI of a document for the Rosette API to process
endpoint: a Rosette API endpoint string (e.g., 'entities')
(see https://developer.rosette.com/features-and-functions)
api: a rosette.api.API instance
(e.g., API(user_key=<key>, service_url=<url>))
language: an optional ISO 639-2 T language code
(the Rosette API will automatically detect the language of the
content by default)
uri: specify that the content is to be treated as a URI and the
the document content is to be extracted from the URI
kwargs: additional keyword arguments
(e.g., if endpoint is 'morphology' you can specify facet='lemmas';
see https://developer.rosette.com/features-and-functions for
complete documentation)
For example:
api = API(user_key=<key>, service_url=DEFAULT_ROSETTE_API_URL)
response = request('This is a sentence.', 'syntax_dependencies', api)
response['sentences'] -> [
{
"startTokenIndex": 0,
"endTokenIndex": 4,
"dependencies": [
{
"dependencyType": "nsubj",
"governorTokenIndex": 3,
"dependentTokenIndex": 0
},
{
"dependencyType": "cop",
"governorTokenIndex": 3,
"dependentTokenIndex": 1
},
{
"dependencyType": "det",
"governorTokenIndex": 3,
"dependentTokenIndex": 2
},
{
"dependencyType": "root",
"governorTokenIndex": -1,
"dependentTokenIndex": 3
},
{
"dependencyType": "punct",
"governorTokenIndex": 3,
"dependentTokenIndex": 4
}
]
}
]
response['tokens'] -> ['This', 'is', 'a', 'sentence', '.']
api.setUrlParameter('output', 'rosette')
response = request('This is a sentence.', 'syntax_dependencies', api)
response['attributes']['dependency']['items'] -> [
{
"relationship": "nsubj",
"governorTokenIndex": 3,
"dependencyTokenIndex": 0
},
{
"relationship": "cop",
"governorTokenIndex": 3,
"dependencyTokenIndex": 1
},
{
"relationship": "det",
"governorTokenIndex": 3,
"dependencyTokenIndex": 2
},
{
"relationship": "root",
"governorTokenIndex": -1,
"dependencyTokenIndex": 3
},
{
"relationship": "punct",
"governorTokenIndex": 3,
"dependencyTokenIndex": 4
}
]
response['attributes']['token']['items'] -> [
{
"startOffset": 0,
"endOffset": 4,
"text": "This"
},
{
"startOffset": 5,
"endOffset": 7,
"text": "is"
},
{
"startOffset": 8,
"endOffset": 9,
"text": "a"
},
{
"startOffset": 10,
"endOffset": 18,
"text": "sentence"
},
{
"startOffset": 18,
"endOffset": 19,
"text": "."
}
]
"""
parameters = DocumentParameters()
if uri:
parameters['contentUri'] = content
else:
parameters['content'] = content
parameters['language'] = language
adm = methodcaller(endpoint, parameters, **kwargs)(api)
return adm
def escape(token):
"""Escape characters that have special semantics within GraphViz"""
pattern = re.compile(r'([\[\]()"\\])')
return pattern.sub(r'\\\1', token)
def extent(obj):
"""Get the start and end offset attributes of a dict-like object"""
return obj.get('startOffset', -1), obj.get('endOffset', -1)
def tokens(adm):
"""Get a sorted list of tokens from the ADM"""
return sorted(adm['attributes']['token']['items'], key=extent)
def dependencies(adm):
"""Get a sorted list of dependency edges from the ADM"""
return sorted(
adm['attributes']['dependency']['items'],
key=itemgetter('governorTokenIndex', 'dependencyTokenIndex')
)
def deps_to_graph(adm, index_labels=False):
"""Create a digraph whose nodes are tokens and edges are dependencies"""
sentence_index = -1
digraph = 'digraph G{{{}'.format(STYLE)
for i, token in enumerate(tokens(adm)):
index_label = '({}) '.format(i) if index_labels else ''
token_text = '{}{}'.format(index_label, escape(token['text']))
digraph += NODE.format(index=i, token=token_text)
for edge in dependencies(adm):
if edge['relationship'] == ROOT:
digraph += '{} [label="S{}"]'.format(sentence_index, -sentence_index)
edge['governorTokenIndex'] = sentence_index
sentence_index -= 1
digraph += EDGE.format(**edge) + '\n'
digraph += '}\n'
return digraph
def make_svg(digraph):
"""Get an SVG from a digraph string (relies on GraphViz)"""
try:
process = subprocess.Popen(
['dot', '-Tsvg'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except OSError:
message = '''Cannot find dot which is required to create the SVG.
(You can install dot from the Graphviz package: http://graphviz.org/)'''
raise Exception(message)
svg, stderr = process.communicate(digraph)
if stderr:
print(stderr, file=sys.stderr)
message = 'Failed to create an svg representation from string: {}'
raise Exception(message.format(digraph))
return svg
def get_content(content, uri=False):
"""Load content from file or stdin"""
if content is None:
content = sys.stdin.read()
elif os.path.isfile(content):
with open(content, mode='r') as f:
content = f.read()
# Rosette API may balk at non-Latin characters in a URI so we can get urllib
# to %-escape the URI for us
if uri:
unquoted = urllib.parse.unquote(content)
content = urllib.parse.quote(unquoted, '/:')
return content
def dump(data, filename):
if filename is None:
print(data, file=sys.stdout)
else:
with open(filename, mode='w') as f:
print(data, file=f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=__doc__
)
parser.add_argument(
'-i', '--input',
help=(
'Path to a file containing input data (if not specified data is '
'read from stdin)'
),
default=None
)
parser.add_argument(
'-u',
'--content-uri',
action='store_true',
help='Specify that the input is a URI (otherwise load text from file)'
)
parser.add_argument(
'-o', '--output',
help=(
'Path to a file where SVG will be written (if not specified data '
'is written to stdout)'
),
default=None
)
parser.add_argument(
'-k', '--key',
help='Rosette API Key',
default=None
)
parser.add_argument(
'-a', '--api-url',
help='Alternative Rosette API URL',
default=DEFAULT_ROSETTE_API_URL
)
parser.add_argument(
'-l', '--language',
help=(
'A three-letter (ISO 639-2 T) code that will override automatic '
'language detection'
),
default=None
)
parser.add_argument(
'-b', '--label-indices',
action='store_true',
help=(
'Add token index labels to show the original token order; '
'this can help in reading the trees, but it adds visual clutter'
)
)
args = parser.parse_args()
# Get the user's Rosette API key
key = (
os.environ.get('ROSETTE_USER_KEY') or
args.key or
getpass(prompt='Enter your Rosette API key: ')
)
# Instantiate the Rosette API
api = API(user_key=key, service_url=args.api_url)
api.set_url_parameter('output', 'rosette')
content = get_content(args.input, args.content_uri)
adm = request(
content,
'syntax_dependencies',
api,
language=args.language,
uri=args.content_uri
)
dump(make_svg(deps_to_graph(adm, args.label_indices)), args.output)
|
import random as rd
from tkinter import *
class Boss():
def __init__(self):
self.HP = 20
self.damage = 2
self.special = 5
self.name = "Toby"
self.weapon = "Great sword"
def set_HP(self, HP):
if(HP < 0):
HP = 0
self.HP = HP
else:
self.HP = HP
def get_HP(self):
if(self.HP == 0):
return self.HP
else:
return self.HP
def get_damage(self):
if(rd.randint(1,50) < 5):
return self.special
else:
return self.damage
def get_weapon(self):
return self.weapon
def get_name(self):
return self.name |
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.dependency_inference import rules as dependency_inference_rules
from pants.backend.python.goals.run_python_source import PythonSourceFieldSet
from pants.backend.python.goals.run_python_source import rules as run_rules
from pants.backend.python.providers.pyenv.custom_install.rules import RunPyenvInstallFieldSet
from pants.backend.python.providers.pyenv.custom_install.rules import (
rules as pyenv_custom_install_rules,
)
from pants.backend.python.providers.pyenv.custom_install.target_types import PyenvInstall
from pants.backend.python.target_types import PythonSourcesGeneratorTarget
from pants.build_graph.address import Address
from pants.core.goals.run import RunRequest
from pants.engine.process import InteractiveProcess
from pants.engine.rules import QueryRule
from pants.engine.target import Target
from pants.testutil.rule_runner import RuleRunner, mock_console
@pytest.fixture
def named_caches_dir(tmp_path):
return f"{tmp_path}/named_cache"
@pytest.fixture
def rule_runner(named_caches_dir) -> RuleRunner:
return RuleRunner(
rules=[
*run_rules(),
*pyenv_custom_install_rules(),
*dependency_inference_rules.rules(),
*target_types_rules.rules(),
QueryRule(RunRequest, (PythonSourceFieldSet,)),
QueryRule(RunRequest, (RunPyenvInstallFieldSet,)),
],
target_types=[
PythonSourcesGeneratorTarget,
PyenvInstall,
],
bootstrap_args=[
f"--named-caches-dir={named_caches_dir}",
],
)
def run_run_request(
rule_runner: RuleRunner,
target: Target,
) -> str:
args = [
(
"--backend-packages=["
+ "'pants.backend.python',"
+ "'pants.backend.python.providers.experimental.pyenv',"
+ "'pants.backend.python.providers.experimental.pyenv.custom_install',"
+ "]"
),
"--source-root-patterns=['src']",
]
# Run the install
install_target = rule_runner.get_target(
Address(target_name="pants-pyenv-install", spec_path="")
)
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
run_request = rule_runner.request(RunRequest, [RunPyenvInstallFieldSet.create(install_target)])
run_process = InteractiveProcess(
argv=run_request.args + ("3.9.16",),
env=run_request.extra_env,
input_digest=run_request.digest,
run_in_workspace=True,
immutable_input_digests=run_request.immutable_input_digests,
append_only_caches=run_request.append_only_caches,
)
with mock_console(rule_runner.options_bootstrapper) as mocked_console:
rule_runner.run_interactive_process(run_process)
print(mocked_console[1].get_stdout().strip())
print(mocked_console[1].get_stderr().strip())
assert "versions/3.9.16/bin/python" in mocked_console[1].get_stdout().strip()
run_request = rule_runner.request(RunRequest, [PythonSourceFieldSet.create(target)])
run_process = InteractiveProcess(
argv=run_request.args,
env=run_request.extra_env,
input_digest=run_request.digest,
run_in_workspace=True,
immutable_input_digests=run_request.immutable_input_digests,
append_only_caches=run_request.append_only_caches,
)
with mock_console(rule_runner.options_bootstrapper) as mocked_console:
rule_runner.run_interactive_process(run_process)
return mocked_console[1].get_stdout().strip()
def test_custom_install(rule_runner, named_caches_dir):
rule_runner.write_files(
{
"src/app.py": dedent(
"""\
import os.path
import sys
import sysconfig
print(sysconfig.get_config_var("prefix"))
print(sys.version.replace("\\n", " "))
"""
),
"src/BUILD": "python_sources(interpreter_constraints=['==3.9.16'])",
}
)
target = rule_runner.get_target(Address("src", relative_file_path="app.py"))
stdout = run_run_request(rule_runner, target)
prefix_dir, version = stdout.splitlines()
assert prefix_dir.startswith(f"{named_caches_dir}/pyenv")
assert "3.9.16" in version
|
import socket
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind(('',5002))
while True:
date,addr = s.recvfrom(1024)
print('received message:{0} from PORT {1} on {2}'.format(date.decode(),addr[1],addr[0]))
if date.decode().lower() == 'bye':
break
s.close() |
# demo01_series.py Series对象
import numpy as np
import pandas as pd
# 创建Series对象
ary = np.array(['zs', 'ls', 'ww', 'zl'])
# 使用index参数可以更改索引
s = pd.Series(ary, index=['s01', 's02', 's03', 's04'])
print(s)
# 使用字典创建Series
s = pd.Series({'s01':'zs', 's02':'ls', 's03':'ww'})
print(s)
# 使用标量创建Series
s = pd.Series(5, index=np.arange(100))
print(s)
# Series的访问
s = pd.Series(ary, index=['s01', 's02', 's03', 's04'])
print(s[1], s['s02'])
print(s[1:3])
print(s['s02':'s03'])
mask = [True, False, False, True]
print(s[mask])
print(s[ [0, 2] ])
print(s[ ['s01', 's03'] ])
# 四个元素倒序
print(s[::-1])
print(s[[3,2,1,0]])
print(s[['s04','s03','s02','s01']])
print(s.index)
print(s.values)
# 测试日期类型数据
print('-' * 40)
dates = pd.Series(['2011', '2011-02', '2011-03-01',
'2011/04/01', '2011/05/01 01:01:01', '01 Jun 2011'])
dates = pd.to_datetime(dates)
print(dates)
print(dates.dt.weekday)
delta = dates - pd.to_datetime('01-01-2011')
print(delta.dt.days)
# 生成一组时间序列
dates = pd.date_range('20201001', periods=7)
print(dates)
dates = pd.date_range('20201001', periods=7, freq='B')
print(dates)
|
#!/usr/bin/env python3
import argparse
import os
import pyjetty.alihfjets.hf_data_io as hfdio
from pyjetty.mputils import perror, pinfo, pwarning, treewriter
from pyjetty.mputils import JetAnalysisWithRho
import fastjet as fj
import fjcontrib
import fjext
import fjtools
import ROOT
ROOT.gROOT.SetBatch(True)
class HFAnalysisInvMass(hfdio.HFAnalysis):
def __init__(self, **kwargs):
self.fout = None
super(HFAnalysisInvMass, self).__init__(**kwargs)
self.fout = ROOT.TFile(self.name+'.root', 'recreate')
self.fout.cd()
# self.hinvmass = ROOT.TH1F('hinvmass', 'hinvmass', 400, 1.5, 2.5)
# self.hinvmass.Sumw2()
# self.hinvmasspt = ROOT.TH2F('hinvmasspt', 'hinvmasspt', 400, 1.5, 2.5, 50, 2, 12)
# self.hinvmasspt.Sumw2()
self.tw = treewriter.RTreeWriter(tree_name='d0', fout=self.fout)
# jet stuff
max_eta = 0.9
self.parts_selector = fj.SelectorPtMin(0.15) & fj.SelectorPtMax(100.0) & fj.SelectorAbsEtaMax(max_eta)
jet_R0 = 0.4
self.jet_selector = fj.SelectorPtMin(5.0) & fj.SelectorPtMax(100.0) & fj.SelectorAbsEtaMax(max_eta - 1.05 * jet_R0)
self.jarho = JetAnalysisWithRho(jet_R=jet_R0, jet_algorithm=fj.antikt_algorithm, particle_eta_max=max_eta)
def analysis(self, df):
if len(df) <= 0:
return
for i, dv in enumerate(self.fj_Dcands):
print(i, self.fj_Dcands[i].phi(), '=?', self.fj_DcandsGhosts[i].phi(), self.fj_Dcands[i].perp(), '=?', self.fj_DcandsGhosts[i].perp())
print(len(self.fj_parts))
#for d in df:
# print (df[d])
# _ipDmatches = fjtools.matched_Reta(d0c, self.fj_parts)
_parts = self.parts_selector(self.fj_parts)
self.jarho.analyze_event(_parts)
_jets = self.jet_selector(self.jarho.jets)
self.tw.fill_branch('j', _jets)
self.tw.fill_branch('D0cand', self.fj_Dcands)
_dRs = [[j.delta_R(dc) for dc in self.fj_Dcands] for j in _jets]
self.tw.fill_branch('jDdR', _dRs, do_enumerate=True)
for c in df.columns:
if 'Particle' in c:
continue
self.tw.fill_branch(c, df[c].values)
self.tw.fill_tree()
def finalize(self):
self.fout.Write()
self.fout.Close()
pinfo(self.fout.GetName(), 'written.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='D0 analysis on alice data', prog=os.path.basename(__file__))
parser.add_argument('-f', '--flist', help='file list to process', type=str, default=None, required=True)
parser.add_argument('-n', '--nfiles', help='max n files to process', type=int, default=0, required=False)
parser.add_argument('-o', '--output', help="output name / file name in the end", type=str, default='test_hfana')
args = parser.parse_args()
hfaio = hfdio.HFAnalysisIO()
# (pt_cand > 2, |z_vtx_reco| < 10, pt_prong(0,1) > 0.5, |eta| < 0.8, |TPC_sigma| < 3, |TOF_sigma| < 3 OR TOF_sigma < -900)
hfa = HFAnalysisInvMass(name = args.output)
hfa.add_selection_range('pt_cand', 2, 1e3)
hfa.add_selection_range_abs('z_vtx_reco', 10)
hfa.add_selection_range('pt_prong0', 0.5, 1e3)
hfa.add_selection_range('pt_prong1', 0.5, 1e3)
hfa.add_selection_range_abs('eta_cand', 0.8)
hfa.add_selection_range_abs('nsigTPC_Pi_0', 3)
hfa.add_selection_range_abs('nsigTPC_Pi_1', 3)
hfa.add_selection_range_abs('nsigTPC_K_0', 3)
hfa.add_selection_range_abs('nsigTPC_K_1', 3)
hfa.add_selection_range_abs('nsigTOF_Pi_0', 3)
hfa.add_selection_range_abs('nsigTOF_Pi_1', 3)
hfa.add_selection_range_abs('nsigTOF_K_0', 3)
hfa.add_selection_range_abs('nsigTOF_K_1', 3)
# hfa.add_selection_range('d_len', 0.05, 1e3)
# hfa.add_selection_range('dca', 0.02, 1e3)
#hfa.add_selection_range('cos_p_xy', 0, 0.99)
#hfa.add_selection_range('d_len_xy', 0.1, 1e3)
#hfa.add_selection_range('dca', 0.01, 1e3)
# hfa.add_selection_range_abs('imp_par_prong0', -0.01)
# hfa.add_selection_range_abs('imp_par_prong1', -0.01)
hfaio.add_analysis(hfa)
# hfaio.load_file("./AnalysisResults.root")
# hfaio.execute_analyses()
hfaio.execute_analyses_on_file_list(args.flist, args.nfiles)
hfa.finalize() |
# Class definition of a linked list
class Node:
def __init__(self, data, next=None):
self.data=data
self.next=next
# Add two linked lists
def add_two_linked_list(node0, node1, carry_over=0):
if not node0 and not node1 and not carry_over:
return None
node0_val = node0.data if node0 else 0
node1_val = node1.data if node1 else 0
total = node0_val + node1_val + carry_over
node0_next = node0.next if node0 else None
node1_next = node1.next if node1 else None
carry_over = 1 if total >= 10 else 0
return Node(total % 10, add_two_linked_list(node0_next, node1_next, carry_over))
|
import numpy as np
import matplotlib.pyplot as plt
n1 = int(input('length of s1: '))
n2 = int(input('length of s2: '))
s1 = []
s2 = []
for i in range(n1):
i1 = input('enter input signal: ')
s1.append(int(i1))
for i in range(n2):
i2 = input('enter response signal: ')
s2.append(int(i2))
s3 = np.flip(s2,0)
length = len(s1)+len(s2)-1
s1_zeros = length - len(s1)
s3_zeros = length - len(s3)
temp_s1 = np.concatenate((np.zeros(s1_zeros),s1))
temp_s2 = np.concatenate((s3, np.zeros(s3_zeros)))
print('temp_s1 (pure input): ', temp_s1)
print('temp_s2 (pure input): ', temp_s2)
mul = 0
out = np.zeros(length)
for i in range(length):
if(i==0):
mul = temp_s1*temp_s2
print('temp_s2 (when i==0): ', temp_s2)
print('temp_s1 (when i==0): ', temp_s1)
out[i] = sum(mul)
else:
temp_s1 = np.concatenate((temp_s1[1:],temp_s1[:1]))#left shift, not sure how to do right shift
print('temp_s2: ', temp_s2)
print('temp_s1: ', temp_s1)
mul = temp_s1*temp_s2
out[i] = sum(mul)
print(out)
|
import codecs
s = input("Enter a String: ")
ro_encrypt = codecs.encode(s,"rot13")
print("Ciphered Text: ",ro_encrypt) |
from reviewapp import create_app
from reviewapp.review import get_python_review,get_mobile_all
from reviewapp.sentiment_analys import go
from reviewapp.dostoevsky_analysys import dostoevsky_run
app = create_app()
with app.app_context():
dostoevsky_run()
#get_python_review()
|
# github code - Support Vector Machine Kernels example
# includes matplotlib graphing plots and sample output
#
# SupportVectorMachineKernels.py
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
####
# build our SVM class. self allows us to use variable
# or functions within the def, that's why we want it
# to be an oject which will be reuseable
#
# The __init__ function get's called only once when we create
# create a new instance of the SVM class
class Support_Vector_Machine:
def __init__(self, visualization=True):
self.visualization = visualization
self.colors = {1:'r', -1:'b'} # only if we're visualizing
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1) # 1x1 grid, plot #1
# train / optimize our data
#
def fit(self, data):
self.data = data
# our dictionary: magnitude of W ||W|| is our key, our values are just W and b
# { ||W|| : (w,b) }
opt_dict = {}
# we'll apply these transoforms to the vector of W
#
transforms = [[1,1],
[-1,1],
[-1,-1],
[1,-1]]
all_data = []
for yi in self.data:
for featureset in self.data[yi]:
for feature in featureset:
all_data.append(feature)
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
all_data = None # need to re-init each call to our class functions
# start with big steps by max feature value; Q: could we thread these steps?
# Note: to get a more precise value, add more steps here:
#
# what's the logic to decide how small of steps we should take?
# support vectors = yi * (xi dot w + b) = 1
# We'll know we have an optimum w and b when we have a value
# of the above eqn is close to 1; eg stop at 1.01
# Q: can we run these three steps in parallel?
# A: No, since we need the prior knowledge of previous
# larger steps sizes to maintain accuracy
#
step_sizes = [self.max_feature_value * 0.1,
self.max_feature_value * 0.01,
self.max_feature_value * 0.001,] # point of expense
# now define more initial values; 5 is expensive
# b doesn't need to take as small as steps as W
# def b to 5
b_range_multiple = 5
# we don't need to take as small of steps with b
# as we do with W
# def to 5
b_multiple = 5
# this is our first element in vector W, first major corner
# were going to cut
#
latest_optimum = self.max_feature_value * 10
# now we're ready to begin the actual stepping process
# we're at the top of the bowl stepping down to bottom
#
for step in step_sizes:
w = np.array([latest_optimum, latest_optimum]) # corner being cut here
# will stay false until we have no more steps to take
# we can do this because of covex optimization
optimized = False
while not optimized: # we want to maximize b here
# arange let's us define of loop step size
# Note: this code black CAN be threaded OK
# we can also optimize the step sizes in b
#
for b in np.arange(-1 * (self.max_feature_value * b_range_multiple),
self.max_feature_value * b_range_multiple,
step * b_multiple):
for transformation in transforms: # we're going to transform W now
w_t = w * transformation
found_option = True ### add a break here later
# weakest link in the SVM fundamentally since we
# need to run this for loop on all the data to check for fit
# SMO attempts to fix this a bit
# Our constraint below needs to be: yi * ((xi dot W) + b) >= 1
#
for i in self.data:
for xi in self.data[i]:
yi = i
if not yi * (np.dot(w_t, xi) + b) >= 1:
found_option = False # we can stop once we find a False
if found_option:
opt_dict[np.linalg.norm(w_t)] = [w_t, b] #magnitude of W
if (w[0] < 0):
optimized = True #we're "step optimized" diff from overall optimized
print("Optimized a step")
else:
# as an example here, w = [5,5]
# step = 1
# w - [step,step] = [4,4]
w = w - step # step some more to find minimum of W
# break out of the entire while loop, then take the next step
# we've hit our optimization value
# here we sort a list of those magnitudes (low to high)
# remember: ||w|| : [w, b]
#
norms = sorted([n for n in opt_dict]) # sorted list of all the magnitudes
opt_choice = opt_dict[norms[0]] # sorts with minimum at [0]
self.w = opt_choice[0]
self.b = opt_choice[1]
latest_optimum = opt_choice[0][0] + step * 2 # could match 2 to 10 above
for i in self.data:
for xi in self.data[i]:
yi = i
print(xi, ':', yi*(np.dot(self.w, xi) + self.b))
### END of fit function
# objective when we visualize the data points is to:
# a. plot our input data points
# b. any predictions we've made
# c. the support vectore hyperplanes
# d. and also the decision boundary hyperplane
# Note: the visualize() function has no bearing on the results,
# it's only to show the data points, and boundaries
#
def visualize(self):
[[self.ax.scatter(x[0] ,x[1], s=100, color=self.colors[i]) for x in data_dict[i]]
for i in data_dict]
# defn of a hyperplane is simply: X dot W + b
# what we want is (x dot w + b)
# psv = positive support vector = 1
# nsv = negative support vector = -1
# decision boundary = 0
# we want to stipulate what v is equal to
# def hyperplace is purely for us to see the hyperplane, not needed for SVM
# since all we really need is w and b and the classification result
#
def hyperplane(x,w,b,v):
return(-w[0] * x - b + v) / w[1]
# to limit our graph:
data_range = (self.min_feature_value * 0.9, self.max_feature_value * 1.1)
hyp_x_min = data_range[0]
hyp_x_max = data_range[1]
# now we can create the positive hyperplane
# again: (w dot x + b) = 1
# psv = positive support vector hyperplane:
#
psv1 = hyperplane(hyp_x_min, self.w, self.b, 1) # returns a scalar value
psv2 = hyperplane(hyp_x_max, self.w, self.b, 1) # returns a scalar value
self.ax.plot([hyp_x_min, hyp_x_max], [psv1,psv2], 'b')
# now we can create the negative hyperplane
# again: (w dot x + b) = -1
# nsv = negative support vector hyperplane:
#
nsv1 = hyperplane(hyp_x_min, self.w, self.b, -1) # returns a scalar value
nsv2 = hyperplane(hyp_x_max, self.w, self.b, -1) # returns a scalar value
self.ax.plot([hyp_x_min, hyp_x_max], [nsv1,nsv2], 'r')
# now we can create the decision boundary
# again: (w dot x + b) = 0
# db = decision boundary:
#
db1 = hyperplane(hyp_x_min, self.w, self.b, 0) # returns a scalar value
db2 = hyperplane(hyp_x_max, self.w, self.b, 0) # returns a scalar value
self.ax.plot([hyp_x_min, hyp_x_max], [db1,db2], 'y--')
plt.show()
### END of visualize function
def predict(self, features): # prediction is: sign[x dot w + b]
classification = np.sign(np.dot(np.array(features), self.w) + self.b)
# print("Classification: ", classification)
# print("SelfColors: ", self.colors[classification])
# print("features[0]: ", features[0], "features[1] :", features[1])
if ((classification != 0) and self.visualization): # asked for graphing
self.ax.scatter(features[0], features[1],
s=200, c=self.colors[classification], marker='*')
return classification
# scatter(self, x, y, s, c, marker,
# cmap, norm, vmin, vmax, alpha, linewidths, verts, edgecolors, **kwargs)
### END of predict function
data_dict = {-1:np.array([[1,7],
[2,8],
[3,8],]),
1:np.array([[5,1],
[6,-1],
[7,3],]) }
# MAIN: create / init an instance of our Support_Vector_Machine class
#
svm = Support_Vector_Machine() # like: clf
svm.fit(data = data_dict)
predict_us = [[0,10],
[1,3],
[3,4],
[3,5],
[5,5],
[5,6],
[6,-5],
[5,8]]
print("Start of predictions:")
for p in predict_us:
svm.predict(p)
print("End of predictions:")
svm.visualize()
################## End ################
# Example output:
#
# Optimized a step
# Optimized a step
# Optimized a step
# [5 1] : 1.016
# [ 6 -1] : 1.688
# [7 3] : 1.016
# [1 7] : 1.224
# [2 8] : 1.224
# [3 8] : 1.0
# Start of predictions:
# End of predictions:
|
# Generated by Django 2.0.1 on 2018-01-23 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='IoT',
fields=[
('serial_no', models.CharField(max_length=50, primary_key=True, serialize=False, unique=True)),
('plate_no', models.CharField(blank=True, max_length=50, unique=True)),
('is_active', models.BooleanField()),
],
),
]
|
from functools import singledispatch
from typing import Tuple
from ._parse import (
AST,
Program,
StatementList,
ExprStatement,
ReturnStatement,
FunctionDefinition,
FunctionCall,
IntLiteral,
BinopExpr
)
RET_REG = 'rax'
ARG1_REG = 'rdi'
# TODO track arithmetic stack pointer during compilation, error if stack
# overflow
ARITH_STACK_SIZE = 100 # QWORDs (1 QWORD = 8 BYTEs)
ARITH_STACK_LABEL = 'arithstack'
ARITH_REG_1 = 'r9'
ARITH_REG_2 = 'r10'
SP = 'r8'
PUSH = 'push_arith'
POP = 'pop_arith'
POP_NO_ARG = 'pop_arith_no_arg'
def join_lines(*lines: str) -> str:
return '\n'.join(lines)
@singledispatch
def compile_ast(ast: AST) -> str:
raise ValueError()
@compile_ast.register
def compile_program(ast: Program) -> str:
return join_lines(
'bits 64',
'',
# TODO remove extern declarations after we support #include, so that
# source files are required to #include a header with declarations for
# the print functions.
f'extern printd',
'',
# https://www.tortall.net/projects/yasm/manual/html/nasm-multi-line-macros.html
f'%macro {PUSH} 1',
f'add {SP}, 8',
f'mov QWORD [{SP}], %1',
'%endmacro',
'',
f'%macro {POP} 1',
f'mov QWORD %1, [{SP}]',
POP_NO_ARG,
'%endmacro',
'',
f'%define {POP_NO_ARG} sub {SP}, 8',
'',
compile_ast(ast.statement_list),
'',
'section .data',
f'{ARITH_STACK_LABEL} times {ARITH_STACK_SIZE} dq 0',
) + '\n'
@compile_ast.register
def compile_statement_list(ast: StatementList) -> str:
compiled_statements = map(compile_ast, ast.statements)
return join_lines(*compiled_statements)
@compile_ast.register
def compile_expr_statement(ast: ExprStatement) -> str:
return join_lines(
compile_ast(ast.expr),
POP_NO_ARG
)
@compile_ast.register
def compile_return_statement(ast: ReturnStatement) -> str:
return join_lines(
compile_ast(ast.expr),
f'{POP} {RET_REG}',
'ret'
)
@compile_ast.register
def compile_function_definition(ast: FunctionDefinition) -> str:
# TODO check function has a return statement and returns correct type
# (during semantic validation)
label = f'{ast.func_name}:'
body = compile_ast(ast.body)
if ast.func_name != 'main':
lines: Tuple[str, ...] = (label, body)
else:
lines = (
'global main',
label,
f'mov {SP}, {ARITH_STACK_LABEL}',
body
)
return join_lines(*lines)
@compile_ast.register
def compile_function_call(ast: FunctionCall) -> str:
# TODO refactor (maybe so that compiler functions can just yield code or
# something)
code = []
if ast.arg is not None:
code.append(
join_lines(
compile_ast(ast.arg),
f'{POP} {ARG1_REG}'
)
)
code.append(
join_lines(
f'push {SP}',
f'call {ast.func_name}',
f'pop {SP}',
# Note that this line is useless (but should be harmless) for calls
# to void functions.
f'{PUSH} {RET_REG}'
)
)
return join_lines(*code)
@compile_ast.register
def compile_int_literal(ast: IntLiteral) -> str:
return f'{PUSH} {ast.value}'
@compile_ast.register
def compile_binop_expr(ast: BinopExpr) -> str:
return join_lines(
compile_ast(ast.left),
compile_ast(ast.right),
compile_binop(ast.binop)
)
def compile_binop(binop: str) -> str:
return '\n'.join((
f'{POP} {ARITH_REG_2}',
f'{POP} {ARITH_REG_1}',
{
'+': f'add {ARITH_REG_1}, {ARITH_REG_2}',
'-': f'sub {ARITH_REG_1}, {ARITH_REG_2}',
'*': f'imul {ARITH_REG_1}, {ARITH_REG_2}',
# division: https://stackoverflow.com/a/45508617
# signed division: https://stackoverflow.com/a/9073207
'/': '\n'.join((
f'mov rax, {ARITH_REG_1}',
'cqo',
f'idiv {ARITH_REG_2}',
f'mov {ARITH_REG_1}, rax'
))
}[binop],
f'{PUSH} {ARITH_REG_1}'
))
|
import random
import os
import yaml
from models import Pair
CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'config.yml')
def choose_reciever(giver, recievers):
choice = random.choice(recievers)
if giver.partner == choice.name or giver.name == choice.name:
if len(recievers) is 1:
raise Exception('Only one reciever left, try again')
return choose_reciever(giver, recievers)
else:
return choice
def create_pairs(g, r):
givers = g[:]
recievers = r[:]
pairs = []
for giver in givers:
try:
reciever = choose_reciever(giver, recievers)
recievers.remove(reciever)
pairs.append(Pair(giver, reciever))
except:
return create_pairs(g, r)
return pairs
def parse_yaml(yaml_path=CONFIG_PATH):
return yaml.load(open(yaml_path))
def parse_email(template):
"""Parse HTML email from given template"""
f = open(template, 'r')
mail_html = ""
while 1:
line = f.readline()
if not line:
break
mail_html += line
f.close()
return mail_html
|
import datetime
import sys
def next_day(date_str):
flag = False
if date_str.count("-") ==2:
data = date_str.split("-")
if data[0].isdigit() and data[1].isdigit() and data[2].isdigit():
if int(data[2])<=31 and int(data[2])>=1 and int(data[1])<=12 and int(data[1])>=1:
flag = True
else:
flag = False
if(flag == True):
data = date_str.split("-")
if int(data[2]) ==28 or int(data[2]) ==29 or int(data[2]) ==30 or int(data[2]) ==31:
if int(data[2])==28:
if int(data[1]) == 2:
if int(data[0]) % 4 == 0:
data[2] = str(int(data[2]) + 1)
else:
data[2] = "01"
if int(data[1])==12:
data[1]="01"
data[0]=str(int(data[0])+1)
else:
data[1]=str(int(data[1])+1)
else:
data[2] = str(int(data[2]) + 1)
elif int(data[2])==29:
if int(data[1]) == 2:
if int(data[0]) % 4 == 0:
data[2] = "01"
if int(data[1]) == 12:
data[1] = "01"
data[0] = str(int(data[0]) + 1)
else:
data[1] = str(int(data[1]) + 1)
else:
print("平年2月没有29日")
return
else:
data[2] = str(int(data[2]) + 1)
elif int(data[2]) == 30:
if int(data[1]) == 1 or int(data[1]) == 3 or int(data[1]) == 5 or int(data[1]) == 7 or int(data[1]) == 8 or int(data[1]) == 10 or int(data[1]) == 12:
data[2] = str(int(data[2]) + 1)
elif int(data[1]) == 2:
print("2月没有30号")
return
else:
data[2] = "01"
if int(data[1]) == 12:
data[1] = "01"
data[0] = str(int(data[0]) + 1)
else:
data[1] = str(int(data[1]) + 1)
elif int(data[2]) == 31:
if int(data[1]) == 1 or int(data[1]) == 3 or int(data[1]) == 5 or int(data[1]) == 7 or int(data[1]) == 8 or int(data[1]) == 10 or int(data[1]) == 12:
data[2] = "01"
if int(data[1]) == 12:
data[1] = "01"
data[0] = str(int(data[0]) + 1)
else:
data[1] = str(int(data[1]) + 1)
else:
print("除大月外的月份没有30号")
return
else:
data[2]=str(int(data[2])+1)
if int(data[2])<10:
if "0" not in data[2]:
data[2]="0"+data[2]
if int(data[1])<10:
if "0" not in data[1]:
data[1]="0"+data[1]
return "-".join(data)
else:
print("请输入正确的日期")
def prev_day(date_str):
flag = False
if date_str.count("-") == 2:
data = date_str.split("-")
if data[0].isdigit() and data[1].isdigit() and data[2].isdigit():
if int(data[2]) <= 31 and int(data[2]) >= 1 and int(data[1]) <= 12 and int(data[1]) >= 1:
flag = True
else:
flag = False
if (flag == True):
data = date_str.split("-")
if int(data[2])==1:
if int(data[1])==1:
data[0]=str(int(data[0])-1)
data[1]="12"
data[2]="31"
elif int(data[1])==2 or int(data[1])==4 or int(data[1])==6 or int(data[1])==8 or int(data[1])==9 or int(data[1])==11:
data[1]=str(int(data[1])-1)
data[2] = "31"
elif int(data[1]) == 3:
if int(data[0]) % 4 == 0:
data[2] = "29"
data[1]="2"
else:
data[2] = "28"
data[1] = "2"
else:
data[1] = str(data[1] - 1)
data[2] = "30"
else:
data[2] = str(int(data[2]) - 1)
if int(data[2])<10:
if "0" not in data[2]:
data[2]="0"+data[2]
if int(data[1])<10:
if "0" not in data[1]:
data[1]="0"+data[1]
return "-".join(data)
else:
print("请输入正确的日期")
while True:
line = sys.stdin.readline()
line = line.strip()
if line == '':
break
print('前一天:', prev_day(line))
print('后一天:', next_day(line)) |
data_list = [
{"id":10001, "wname":"python","year":"2001"},
{"id":10002, 'wname':'UI','year':'2002'},
{"id":10004, 'wname':'AI','year':'2003'}
]
try:
with open('ws.txt','w') as file:
for data in data_list:
line = f'{data["id"]},{data["wname"]},{data["year"]}\n'
file.write(line)
except Exception as e:
print(str(e)) |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# TypeError session
from ..exceptions import DigitError, IntError, RealError, ComplexError, \
BoolError, BytesError, StringError, DictError, \
ListError, TupleError, ProtocolUnbound
# AttributeError session
from ..exceptions import FormatError, UnsupportedCall
# IOError session
from ..exceptions import FileError
# IndexError session
from ..exceptions import ProtocolNotFound
# ValueError session
from ..exceptions import VersionError, IndexNotFound
# Deprecated / Base classes
from ..exceptions import BaseError
|
<<<<<<< HEAD
from django.test import TestCase, override_settings
from django.core import mail
from django.urls import reverse
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
import tempfile
from django.conf import settings
from .models import User, Group, Post
class EmailTest(TestCase):
def test_email_send(self):
self.client.post('/auth/signup/', {'username': 'terminator', 'email': 'rocki@gmail.com', \
'password1': 'skynetMyLove', 'password2': 'skynetMyLove'})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,'Подтверждение регистрации Yatube')
@override_settings(CACHES=settings.TEST_CACHE)
class TestUserActions(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="TestUser", password="mrRobot")
self.client.login(username='TestUser', password='mrRobot')
self.user2 = User.objects.create_user(username="TestUser2", password="mrRobot")
def test_new_profile(self):
self.client.post('/auth/signup/', {'username': 'terminator', 'email': 'rocki@gmail.com', \
'password1': 'skynetMyLove', 'password2': 'skynetMyLove'})
response = self.client.get('/terminator/')
self.assertEqual(response.status_code, 200)
# проверка постов
def test_new_post(self):
response = self.client.post(reverse('new_post'), {'text': 'Пост создан'})
self.assertContains(response, 'Ваша запись была успешно опубликована!')
def test_new_post_logout(self):
self.client.logout()
response = self.client.post('/new/')
self.assertRedirects(response, '/auth/login/?next=/new/')
def test_publish_post(self):
text = 'Создаём пост через тест'
self.client.post(reverse('new_post'), {'text': 'Создаём пост через тест'})
test_urls = [
'',
'/TestUser/',
'/TestUser/1/',
]
for url in test_urls:
response = self.client.get(url)
self.assertContains(response, text)
def test_edit_post(self):
self.client.post(reverse('new_post'), {'text': 'Создаём пост через тест'})
text = 'Отредактировал пост'
self.client.post(reverse('post_edit', args=['TestUser', 1]), {'text': text})
test_urls = [
'',
'/TestUser/',
'/TestUser/1/',
]
for url in test_urls:
response = self.client.get(url)
self.assertContains(response, text)
def test_delete_post(self):
text = 'Удалим этот пост'
self.client.post(reverse('new_post'), {'text': text})
self.client.get(reverse('post_delete', args=['TestUser', 1]))
response1 = self.client.get('')
self.assertNotContains(response1, text)
response2 = self.client.get('/TestUser/')
self.assertNotContains(response2, text)
response3 = self.client.get('/TestUser/1/')
self.assertEqual(response3.status_code, 404)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_add_image(self):
self.group = Group.objects.create(title='TestGroup', slug='testgroup', description='Test')
with open('posts/test_media/image.png', 'rb') as fp:
self.client.post('/new/', {'group': '1', 'text': 'Test', 'image': fp})
test_urls = [
'',
'/TestUser/',
'/TestUser/1/',
'/group/testgroup/'
]
for url in test_urls:
response = self.client.get(url)
self.assertContains(response, '<img')
with open('posts/test_media/not_image.txt', 'rb') as fp:
response = self.client.post( '/new/', {'group': '1', 'text': 'Dont publish', 'image': fp})
response = self.client.get('')
self.assertNotContains(response, 'Dont publish')
# проверка подписок
def test_follow_unfollow(self):
# подписка
self.client.get('/TestUser2/follow/')
response = self.client.get('/TestUser/')
self.assertEqual(response.context['follows'], 1)
# отписка
self.client.get('/TestUser2/unfollow/')
response = self.client.get('/TestUser/')
self.assertEqual(response.context['follows'], 0)
def test_follow_feed(self):
# если подписан отображается
self.post = Post.objects.create(text='Подписан', author=self.user2)
self.client.get('/TestUser2/follow/')
response = self.client.get('/follow/')
self.assertContains(response, 'Подписан')
# если не подписан - не отображается
self.client.get('/TestUser2/unfollow/')
response = self.client.get('/follow/')
self.assertNotContains(response, 'Подписан')
def test_add_comment(self):
# авторизированный пользователь может комментировать
self.client.post('/new/', {'text': 'Test'})
self.client.post('/TestUser/1/comment/', {'text': 'авторизован'})
response = self.client.get('/TestUser/1/')
self.assertContains(response, 'авторизован')
# неавторизированный пользователь не может комментировать
self.client.logout()
self.client.post('/TestUser/1/comment/', {'text': 'не авторизован'})
response = self.client.get('/TestUser/1/')
self.assertNotContains(response, 'не авторизован')
class TestServer(TestCase):
def test_cache(self):
key = make_template_fragment_key('index_page', [1])
self.assertFalse(cache.get(key))
self.client.get('')
self.assertTrue(cache.get(key))
def test_get_404(self):
response = self.client.get('/404/')
self.assertEqual(response.status_code, 404)
=======
from django.test import TestCase, override_settings
from django.core import mail
from django.urls import reverse
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
import tempfile
from django.conf import settings
from .models import User, Group, Post
class EmailTest(TestCase):
def test_email_send(self):
self.client.post('/auth/signup/', {'username': 'terminator', 'email': 'rocki@gmail.com', \
'password1': 'skynetMyLove', 'password2': 'skynetMyLove'})
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject,'Подтверждение регистрации Yatube')
@override_settings(CACHES=settings.TEST_CACHE)
class TestUserActions(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="TestUser", password="mrRobot")
self.client.login(username='TestUser', password='mrRobot')
self.user2 = User.objects.create_user(username="TestUser2", password="mrRobot")
def test_new_profile(self):
self.client.post('/auth/signup/', {'username': 'terminator', 'email': 'rocki@gmail.com', \
'password1': 'skynetMyLove', 'password2': 'skynetMyLove'})
response = self.client.get('/terminator/')
self.assertEqual(response.status_code, 200)
# проверка постов
def test_new_post(self):
response = self.client.post(reverse('new_post'), {'text': 'Пост создан'})
self.assertContains(response, 'Ваша запись была успешно опубликована!')
def test_new_post_logout(self):
self.client.logout()
response = self.client.post('/new/')
self.assertRedirects(response, '/auth/login/?next=/new/')
def test_publish_post(self):
text = 'Создаём пост через тест'
self.client.post(reverse('new_post'), {'text': 'Создаём пост через тест'})
test_urls = [
'',
'/TestUser/',
'/TestUser/1/',
]
for url in test_urls:
response = self.client.get(url)
self.assertContains(response, text)
def test_edit_post(self):
self.client.post(reverse('new_post'), {'text': 'Создаём пост через тест'})
text = 'Отредактировал пост'
self.client.post(reverse('post_edit', args=['TestUser', 1]), {'text': text})
test_urls = [
'',
'/TestUser/',
'/TestUser/1/',
]
for url in test_urls:
response = self.client.get(url)
self.assertContains(response, text)
def test_delete_post(self):
text = 'Удалим этот пост'
self.client.post(reverse('new_post'), {'text': text})
self.client.get(reverse('post_delete', args=['TestUser', 1]))
response1 = self.client.get('')
self.assertNotContains(response1, text)
response2 = self.client.get('/TestUser/')
self.assertNotContains(response2, text)
response3 = self.client.get('/TestUser/1/')
self.assertEqual(response3.status_code, 404)
@override_settings(MEDIA_ROOT=tempfile.gettempdir())
def test_add_image(self):
self.group = Group.objects.create(title='TestGroup', slug='testgroup', description='Test')
with open('posts/test_media/image.png', 'rb') as fp:
self.client.post('/new/', {'group': '1', 'text': 'Test', 'image': fp})
test_urls = [
'',
'/TestUser/',
'/TestUser/1/',
'/group/testgroup/'
]
for url in test_urls:
response = self.client.get(url)
self.assertContains(response, '<img')
with open('posts/test_media/not_image.txt', 'rb') as fp:
response = self.client.post( '/new/', {'group': '1', 'text': 'Dont publish', 'image': fp})
response = self.client.get('')
self.assertNotContains(response, 'Dont publish')
# проверка подписок
def test_follow_unfollow(self):
# подписка
self.client.get('/TestUser2/follow/')
response = self.client.get('/TestUser/')
self.assertEqual(response.context['follows'], 1)
# отписка
self.client.get('/TestUser2/unfollow/')
response = self.client.get('/TestUser/')
self.assertEqual(response.context['follows'], 0)
def test_follow_feed(self):
# если подписан отображается
self.post = Post.objects.create(text='Подписан', author=self.user2)
self.client.get('/TestUser2/follow/')
response = self.client.get('/follow/')
self.assertContains(response, 'Подписан')
# если не подписан - не отображается
self.client.get('/TestUser2/unfollow/')
response = self.client.get('/follow/')
self.assertNotContains(response, 'Подписан')
def test_add_comment(self):
# авторизированный пользователь может комментировать
self.client.post('/new/', {'text': 'Test'})
self.client.post('/TestUser/1/comment/', {'text': 'авторизован'})
response = self.client.get('/TestUser/1/')
self.assertContains(response, 'авторизован')
# неавторизированный пользователь не может комментировать
self.client.logout()
self.client.post('/TestUser/1/comment/', {'text': 'не авторизован'})
response = self.client.get('/TestUser/1/')
self.assertNotContains(response, 'не авторизован')
class TestServer(TestCase):
def test_cache(self):
key = make_template_fragment_key('index_page', [1])
self.assertFalse(cache.get(key))
self.client.get('')
self.assertTrue(cache.get(key))
def test_get_404(self):
response = self.client.get('/404/')
self.assertEqual(response.status_code, 404)
>>>>>>> e00ceddaa1758d008aea9fd3ff70b76728ca2368
|
#!/usr/bin/python
import numpy as np
import math
from roboclaw import *
vx = 0
vy = 0
w = 0
theta = 0
wheel_radius = 3.0
M = 1.0/wheel_radius*np.matrix([[-0.5,0.866,(0.866*6.9282+0.5*4)],[-0.5,-0.866,(0.866*6.9282+.5*4)],[1,0,8]])
R = np.matrix([[],[],[]])
magic_number = 2**13
back_number = 2**12.8
def rotation_matrix(theta):
R = np.matrix([[math.cos(theta),math.sin(theta),0],[-math.sin(theta),math.cos(theta),0],[0,0,1]])
return R
while True:
#receive vx,vy,w through JSON
vx = 0
vy = 10
w = 1
theta = 0*math.pi/180
R = rotation_matrix(theta)
V = [[vx],[vy],[w]]
final = M*R*V
omega1 = int(final[0]*magic_number)
omega2 = int(final[1]*magic_number)
omega3 = int(final[2]*back_number)
print omega1, omega2, omega3
SetM1Speed(128,-1*omega1)
SetM2Speed(128,-1*omega2)
SetM2Speed(129,-1*omega3)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
import sys
import traceback
from django.core.management import setup_environ
sys.path.append("/home/bao/public_html/")
from bao import settings
setup_environ(settings)
from bao.athaliana.models import Syntelog
import csv
reader = csv.DictReader(open("data/data.csv"))
for i, row in enumerate(reader):
if i % 1000 == 0: print >>sys.stderr, i, "records loaded"
try:
Syntelog.objects.get_or_create(**row)
except:
print row
|
A=int(input())
B=int(input())
list=[]
for j in range(A,B+1):
if j % 3 == 0:
list.append(j)
print(sum(list) / len(list)) |
# if practice
number = int(input("pls input a number:"))
if number > 18:
print("it's ok")
else:
print("it's a boy")
sex = input("pls input your sex(man/woman):")
if sex == "man":
print("you are %s" % sex)
elif sex == "woman":
print("you are %s" % sex)
else:
print("you input wrong sex")
|
from django.shortcuts import render
from .apps import PricepredictorConfig
from django.http import JsonResponse
from rest_framework.views import APIView
import pandas as pd
class call_model(APIView):
def get(self,request):
if request.method == 'GET':
return render(request, 'index.html')
def post(self, request):
if request.method == 'POST':
# get sound from request
income = request.POST.get('Income')
age = request.POST.get('Age')
rooms = request.POST.get('Rooms')
bedrooms = request.POST.get('Bedrooms')
population = request.POST.get('Population')
# np.array([income,age,rooms,bedrooms,population])
price = {'Income': [int(income)],
'House Age': [int(age)],
'Number of Rooms': [int(rooms)],
'Number of Bedrooms': [int(bedrooms)],
'Area Population': [int(population)]
}
df = pd.DataFrame(price,columns=['Income','House Age','Number of Rooms','Number of Bedrooms','Area Population'])
# vectorize sound
# vector = PricepredictorConfig.vectorizer.transform([income,age,rooms,bedrooms,population])
# predict based on vector
prediction = PricepredictorConfig.regressor.predict(df)[0]
# build response
response = {'Price': int(prediction)}
print(prediction)
# return response
# return JsonResponse(response)
return render(request, 'index.html',response) |
"""This module contains a function to plot graph to analyze the grade change over time"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#author: Muhe Xie
#netID: mx419
#date: 11/26/2015
def generate_line_graph(df_data,plot_title):
'''this function will generate a line plot to show the change of count of the grades'''
plt.figure(figsize = (15,10))
plt.plot_date(df_data.index,df_data['A'],'r--',linewidth = 2, label = 'GRADE A')
plt.plot_date(df_data.index,df_data['B'],'y--',linewidth = 2,label = 'GRADE B')
plt.plot_date(df_data.index,df_data['C'],'k--',linewidth = 2,label = 'GRADE C')
plt.legend(loc='best')
plt.title('grade improvement of '+plot_title)
if plot_title == 'staten island':
save_file_name = 'grade_improvement_staten.pdf'
else:
save_file_name = 'grade_improvement_'+plot_title+'.pdf'
plt.savefig(save_file_name)
return
|
#Esta función proporciona soporte a los parámetros de la consola:
def parseator():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-Nombre", type=str,help="Muestra los resultados del nombre elegido")
parser.add_argument("-Empresa", type=str,help="Muestra los resultados de la empresa elegida")
parser.add_argument("-Genero", type=str,help="Muestra los resultados del género elegido")
parser.add_argument("-Mejores", type=str,help="Muestra los mejores resultados")
parser.add_argument("-Tamaño", type=int,help="Modula la cantidad de filas del dataset final")
parser.add_argument("-Pdf", action='store_true',help="Genera un PDF")
args = parser.parse_args()
return args |
Hello atgiugu!
Hello World!
|
import os
import numpy as np
import torch
import torch.nn as nn
from torch.nn import init
PI = np.pi
class Actor(nn.Module):
def __init__(self, input_size, output_size, order=1, lr=0.001):
super(Actor, self).__init__()
# parameters
self._out_gain = PI / 9
# self._norm_matrix = 0.1 * torch.tensor([2, 1, 10, 10], dtype=torch.float32)
self._norm_matrix = 0.1 * torch.tensor([1, 1, 1, 1], dtype=torch.float32)
# initial NNs
self.layers = nn.Sequential(
nn.Linear(input_size, 256),
nn.ELU(),
nn.Linear(256, 256),
nn.ELU(),
nn.Linear(256, output_size),
nn.Tanh()
)
# initial optimizer
self.opt = torch.optim.Adam(self.parameters(), lr=lr)
self._initialize_weights()
# zeros state value
self._zero_state = torch.tensor([0.0, 0.0, 0.0, 0.0])
def forward(self, x):
"""
Parameters
----------
x: polynomial features, shape:[batch, feature dimension]
Returns
-------
value of current state
"""
temp = torch.mul(x, self._norm_matrix)
x = torch.mul(self._out_gain, self.layers(temp))
return x
def _initialize_weights(self):
"""
initial parameter using xavier
"""
for m in self.modules():
if isinstance(m, nn.Linear):
init.xavier_uniform_(m.weight)
init.constant_(m.bias, 0.0)
def loss_function(self, utility, p_V_x, f_xu):
hamilton = utility + torch.diag(torch.mm(p_V_x, f_xu.T))
loss = torch.mean(hamilton)
return loss
def predict(self, x):
return self.forward(x).detach().numpy()
def save_parameters(self, logdir):
"""
save model
Parameters
----------
logdir, the model will be saved in this path
"""
torch.save(self.state_dict(), os.path.join(logdir, "actor.pth"))
def load_parameters(self, load_dir):
self.load_state_dict(torch.load(os.path.join(load_dir,'actor.pth')))
class Critic(nn.Module):
"""
NN of value approximation
"""
def __init__(self, input_size, output_size, order=1, lr=0.001):
super(Critic, self).__init__()
# initial parameters of actor
self.layers = nn.Sequential(
nn.Linear(input_size, 256),
nn.ELU(),
nn.Linear(256, 256),
nn.ELU(),
nn.Linear(256, 256),
nn.ELU(),
nn.Linear(256, output_size),
nn.ReLU()
)
self._norm_matrix = 0.1 * torch.tensor([2, 5, 10, 10], dtype=torch.float32)
# initial optimizer
self.opt = torch.optim.Adam(self.parameters(), lr=lr)
self._initialize_weights()
self.lrScheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.opt, T_max=5000, eta_min=1e-6)
# zeros state value
self._zero_state = torch.tensor([0.0, 0.0, 0.0, 0.0])
def predict(self, state):
"""
Parameters
----------
state: current state [batch, feature dimension]
Returns
-------
out: value np.array [batch, 1]
"""
return self.forward(state).detach().numpy()
def forward(self, x):
"""
Parameters
----------
x: polynomial features, shape:[batch, feature dimension]
Returns
-------
value of current state
"""
x = torch.mul(x, self._norm_matrix)
x = self.layers(x)
return x
def _initialize_weights(self):
"""
initial paramete using xavier
"""
for m in self.modules():
if isinstance(m, nn.Linear):
init.xavier_uniform_(m.weight)
init.constant_(m.bias, 0.0)
def save_parameters(self, logdir):
"""
save model
Parameters
----------
logdir, the model will be saved in this path
"""
torch.save(self.state_dict(), os.path.join(logdir, "critic.pth"))
def load_parameters(self, load_dir):
self.load_state_dict(torch.load(os.path.join(load_dir,'critic.pth'))) |
import numpy as np
import sys
import seaborn as sb
import scipy.stats as stats
import matplotlib.pyplot as plt
import pickle
import argparse
import pymc3 as pm
import pathlib
# import some cool fortran modules that are precompiled and fast
from model_paths import fortran_path, base_path, data_path
sys.path.append(fortran_path)
sys.path.append(base_path)
from inference import *
from ForcingModel import ForcingModel
from model_wrapper_v2 import *
import argparse
def kge(mod, obs):
# mean ratio
b = np.mean(mod) / np.mean(obs)
# std
a = np.std(mod) / np.std(mod)
# corr coeff
r = np.corrcoef(mod, obs)[0, 1] # corrcoef returns the correlation matrix...
# the diagonals are 1, the off-diags are the 'r'
# value that we want
kgeval = 1 - np.sqrt((r - 1.)**2 + (a - 1.)**2 + (b - 1)**2)
return kgeval
# End helper functions
parser = argparse.ArgumentParser()
parser.add_argument("trace", type=str)
parser.add_argument("prior", type=str)
parser.add_argument("--param_file", type=str, help="parameter (yml) file", default="./prior_parameters/param.yml")
parser.add_argument("--silent", type=bool, help="yes/no to log output", default=True)
parser.add_argument("--output_loc", type=str, help="output location directory", default="./")
args = parser.parse_args()
# get important stuff
trace = pathlib.Path(args.trace)
prior = pathlib.Path(args.prior)
start_date = trace.name.split("_")[1]
end_date = trace.name.split("_")[2].split(".")[0]
print(start_date, end_date)
# open up the trace and priors
with open(prior, 'rb') as buff:
prior_dict = pickle.load(buff)
with open(trace, 'rb') as buff:
trace = pickle.load(buff)
# pm.traceplot(trace)
# get all of the trace points...
# configure model
nlayers = 100
model = model_wrapper()
model.read_parameter_files()
model.create_forcings(start_date, end_date, nlayers)
model.substitute_arguments([],[])
# open the parameter dictionaries...
with open(args.param_file, "r") as p:
dct = yaml.load(p, yaml.FullLoader)
# update initial conditions
model.hydro_model_args.update(dct['initial_conditions'])
elev_areas = model.elevation_bins_areas.reshape(nlayers,1)
# DO the prior precipitation
def p_from_dict(trace_dict, dct, model):
precip_list = []
len_of_dict = len(trace_dict[[k for k in trace_dict][0]])
for i in range(len_of_dict):
update_snow = {}
for key in dct['snow'].keys():
update_snow[key] = trace_dict.get(key)[i]
update_hydro = {}
for key in dct['hydro'].keys():
update_hydro[key] = trace_dict.get(key)[i]
# now run the model..
model.snow_model_args.update(update_snow)
model.hydro_model_args.update(update_hydro)
precip_list.append(model.runmain([])['qin'].sum())
return precip_list
inferred_precip_list = p_from_dict(trace, dct, model)
prior_precip_list = p_from_dict(prior_dict, dct, model)
# write out the precipitation
inf_p_name="inferred_precip_%s-%s.pkl"%(start_date, end_date)
p_p_name="prior_precip_%s-%s.pkl"%(start_date, end_date)
# write the files..
with open(inf_p_name, "wb") as handle:
pickle.dump(inferred_precip_list, handle)
with open(p_p_name, "wb") as handle:
pickle.dump(prior_precip_list, handle)
|
from PyQt5 import QtWidgets, QtCore
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT
from sklearn.decomposition import PCA
from matplotlib.ticker import MaxNLocator
from matplotlib import pyplot as plt
import sys
import math
import numpy as np
from neural_net import ObservableNet
class Window(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.observable_net = self.grad_displays = \
self.weight_displays = self.weights = self.gradients = self.displays = None
self.l1_from = self.l1_to = self.l2_from = self.l2_to = self.step = self.adjust_value = \
self.grad_vectors = self.weight_vectors = None
self.layer = 0
self.epochs = 10
self.delete_x = 0
self.delete_y = 0
self.initialize_observable_net()
self.ad_value = None
self.vis = 'gradient'
self.figure = Figure()
self.canvas = FigureCanvasQTAgg(self.figure)
self.toolbar = NavigationToolbar2QT(self.canvas, self)
self.init_ui()
self.update_texts()
def initialize_observable_net(self):
observable_net = ObservableNet(784)
observable_net.add_layer(512, name='hidden', seed=5034)
observable_net.add_layer(256, name='hidden2', seed=6456)
observable_net.add_layer(128, name='hidden3', seed=7675)
observable_net.add_layer(64, name='hidden4', seed=8345)
observable_net.add_layer(10, name='output', activation='linear', seed=997)
observable_net.train(self.epochs)
self.observable_net = observable_net
self.weights = observable_net.weights
self.gradients = observable_net.gradients
self.grad_vectors = [observable_net.create_time_vectors
('gradient', layer) for layer in range(5)]
self.weight_vectors = [observable_net.create_time_vectors
('weight', layer) for layer in range(5)]
self.grad_displays = self.create_displays(self.grad_vectors)
self.weight_displays = self.create_displays(self.weight_vectors)
def init_ui(self):
self.setMinimumHeight(500)
self.setMinimumWidth(1000)
main_group = QtWidgets.QGroupBox('Visualization Settings')
setting_layout = QtWidgets.QVBoxLayout()
setting_layout.setAlignment(QtCore.Qt.AlignTop)
main_group.setMaximumWidth(int(self.width() / 3.5))
main_group.setLayout(setting_layout)
h_box = QtWidgets.QHBoxLayout()
center = QtWidgets.QGroupBox()
left = QtWidgets.QVBoxLayout()
left.addWidget(self.toolbar)
left.addWidget(self.canvas)
center.setLayout(left)
self.init_settings(setting_layout)
h_box.addWidget(center)
h_box.addWidget(main_group)
self.setLayout(h_box)
self.plot(d_first=True)
self.show()
def change_layer(self, value):
self.layer = value
self.update_texts()
self.plot()
def update_texts(self):
self.l1_from.setText('0')
self.l2_from.setText('0')
self.l1_to.setText(str(self.grad_displays[self.layer].shape[0]))
self.l2_to.setText(str(int(self.grad_displays[self.layer].shape[1] / self.epochs)))
def change_to_grad(self):
self.vis = 'gradient'
self.plot()
def change_to_weight(self):
self.vis = 'weight'
self.plot()
def adjust_mm(self):
self.ad_value = float(self.adjust_value.text())
self.plot()
def change_to_combined(self):
self.vis = 'combined'
self.plot()
def create_displays(self, time_vectors):
displays = list()
for layer in range(len(time_vectors)):
display = list()
for row in time_vectors[layer]:
# row = MinMaxScaler().fit_transform(row)
x = row.flatten()
display.append(x)
displays.append(np.array(display))
return displays
def init_settings(self, layout):
vis_label = QtWidgets.QLabel('Properties:')
layout.addWidget(vis_label)
buttons = QtWidgets.QGroupBox()
h_box = QtWidgets.QHBoxLayout()
buttons.setLayout(h_box)
layout.addWidget(buttons)
first = QtWidgets.QRadioButton('Gradients')
first.toggle()
first.toggled.connect(self.change_to_grad)
h_box.addWidget(first)
second = QtWidgets.QRadioButton('Weights')
second.toggled.connect(self.change_to_weight)
h_box.addWidget(second)
third = QtWidgets.QRadioButton('Combination')
#third.toggled.connect(self.change_to_combined)
#h_box.addWidget(third)
layer_label = QtWidgets.QLabel('Layer:')
layout.addWidget(layer_label)
layer_selection = QtWidgets.QComboBox()
layout.addWidget(layer_selection)
layer_items = list()
for item in self.weights['layer'].unique():
if item == 0:
layer_items.append('Input - Hidden 1')
elif item == len(self.weights['layer'].unique()) - 1:
layer_items.append('Hidden ' + str(item) + ' - Output')
else:
layer_items.append('Hidden ' + str(item) + ' - Hidden ' + str(item + 1))
layer_selection.addItems(layer_items)
layer_selection.currentIndexChanged.connect(self.change_layer)
l1_selection = QtWidgets.QGroupBox()
l1_selection_box = QtWidgets.QHBoxLayout()
self.l1_from = l1_from_selection = QtWidgets.QLineEdit('0')
self.l1_to = l1_to_selection = QtWidgets.QLineEdit('784')
l1_selection.setLayout(l1_selection_box)
l1_selection_box.addWidget(l1_from_selection)
l1_selection_box.addWidget(QtWidgets.QLabel(':'))
l1_selection_box.addWidget(l1_to_selection)
l2_selection = QtWidgets.QGroupBox()
l2_selection_box = QtWidgets.QHBoxLayout()
self.l2_from = l2_from_selection = QtWidgets.QLineEdit('0')
self.l2_to = l2_to_selection = QtWidgets.QLineEdit('512')
l2_selection.setLayout(l2_selection_box)
l2_selection_box.addWidget(l2_from_selection)
l2_selection_box.addWidget(QtWidgets.QLabel(':'))
l2_selection_box.addWidget(l2_to_selection)
layout.addWidget(QtWidgets.QLabel('Show Neurons:'))
layout.addWidget(QtWidgets.QLabel('Neurons of Y-Axis Layer:'))
layout.addWidget(l1_selection)
layout.addWidget(QtWidgets.QLabel('Neurons of X-Axis Layer:'))
layout.addWidget(l2_selection)
layout.addWidget(QtWidgets.QLabel('Step:'))
self.step = QtWidgets.QLineEdit('1')
layout.addWidget(self.step)
apply_button = QtWidgets.QPushButton('Apply')
apply_button.pressed.connect(self.change_values)
layout.addWidget(apply_button)
self.adjust_value = QtWidgets.QLineEdit('')
layout.addWidget(self.adjust_value)
adjust = QtWidgets.QPushButton('Adjust')
adjust.pressed.connect(self.adjust_mm)
layout.addWidget(adjust)
def change_values(self):
self.plot(l1_from=int(self.l1_from.text()),
l1_to=int(self.l1_to.text()), l2_from=int(self.l2_from.text()), l2_to=int(self.l2_to.text()),
step=int(self.step.text()))
self.ad_value = None
def get_display(self, vis, l1_from, l1_to, l2_from, l2_to, step, remove_first=False):
if vis == 'gradient':
display = self.grad_displays[self.layer]
else:
display = self.weight_displays[self.layer]
if self.vis=='weight':
self.delete_x = np.min(display)
self.delete_y = np.max(display)
else:
max_value = np.abs(np.max(display))
min_value = np.abs(np.min(display))
max_value = min_value = np.max([min_value, max_value])
max_value = np.sign(np.max(display)) * max_value
min_value = np.sign(np.min(display)) * min_value
self.delete_x = min_value
self.delete_y = max_value
if l1_from is None:
l1_from = 0
if l2_from is None:
l2_from = 0
if l1_to is None:
l1_to = display.shape[0]
if l2_to is None:
l2_to = display.shape[1] / self.epochs
if 0 <= l1_to <= display.shape[0] and 0 <= l2_to <= display.shape[1] / self.epochs \
and 0 <= l1_from <= display.shape[0] and l1_to >= l1_from \
and l2_to >= l2_from and 0 <= l2_to <= display.shape[1] / self.epochs:
if l1_from == l1_to:
l1_to = l1_to + 1
if l2_from == l2_to:
l2_to = l2_to + 1
display = display[l1_from:l1_to, l2_from * self.epochs:int(l2_to) * self.epochs]
new_len = self.epochs
if 1 < step < self.epochs:
display = np.delete(display,
[i for i in range(int(display.shape[1])) if i % step != 0],
axis=1)
new_len = math.ceil(new_len / step)
if remove_first:
display = np.delete(display,
[i for i in range(int(display.shape[1])) if i % new_len == 0],
axis=1)
return display
def plot(self, l1_from=0, l1_to=None, l2_from=0, l2_to=None, step=1, d_first=False):
self.figure.clear()
ax = self.figure.add_subplot(111)
ax.clear()
plt.tick_params(labelsize=20)
if self.layer == len(self.grad_displays) - 1:
ax.set_xlabel('Output Layer')
else:
ax.set_xlabel('Hidden Layer ' + str(self.layer + 1))
if self.layer > 0:
ax.set_ylabel('Hidden Layer ' + str(self.layer))
else:
ax.set_ylabel('Input Layer')
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
v_min = v_max = None
if self.vis == 'gradient':
cmap = 'RdBu_r'
else:
cmap = 'viridis'
if self.vis is not 'combined':
display = self.get_display(self.vis, l1_from, l1_to, l2_from, l2_to, step)
if self.ad_value is None:
if self.vis == 'gradient':
max_value = np.abs(np.max(display))
min_value = np.abs(np.min(display))
max_value = min_value = np.max([min_value, max_value])
max_value = np.sign(np.max(display)) * max_value
min_value = np.sign(np.min(display)) * min_value
v_min = min_value
v_max = max_value
else:
v_max = self.ad_value
v_min = (-1) * self.ad_value
display = ax.imshow(display, aspect='auto', cmap=cmap, interpolation='None',
extent=[0, int(len(display[0]) / self.epochs), len(display), 0], vmin=v_min,
vmax=v_max)
cb = self.figure.colorbar(display)
if self.vis == 'gradient':
cb.set_label('Gradient')
else:
cb.set_label('Weight')
else:
display_1 = self.get_display('gradient', l1_from, l1_to, l2_from, l2_to, step)
display_2 = self.get_display('weight', l1_from, l1_to, l2_from, l2_to, step)
display_1 = ax.imshow(display_1, aspect='auto', cmap='RdBu', interpolation='None',
extent=[0, int(len(display_1[0]) / self.epochs), len(display_1), 0], vmax=v_max,
vmin=v_min)
cb = self.figure.colorbar(display_1, shrink=0.5)
cb.set_label('gradient')
display_2 = ax.imshow(display_2, aspect='auto', cmap='PuOr', interpolation='None',
extent=[0, int(len(display_2[0]) / self.epochs), len(display_2), 0], vmin=v_min,
vmax=v_max)
cb_2 = self.figure.colorbar(display_2, shrink=0.5)
cb_2.set_label('weight')
self.canvas.draw()
def visualize_time_vectors(self, layer):
vectors = list()
for row in self.time_vectors[layer]:
for vector in row:
vectors.append(vector)
representations = PCA().fit_transform(vectors)
plt.scatter(representations[:, 0], representations[:, 1])
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
app_window = Window()
sys.exit(app.exec_())
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# ventanas_Tkinter_curso.py
#
from Tkinter import *
root = Tk(className ="Mi primera GUI")
svalue = StringVar() # definimos el widget como string
w = Entry(root,textvariable=svalue) # añadimos textarea widget
w.pack()
def act():
print "por pantalla"
print '%s' % svalue.get()
foo = Button(root,text="Clikear", command=act) # clase de objeto foo
foo.pack()
root.mainloop()
|
import pygame
from GameParameter import clock
from StartMenu import StartMenu
from SelectMenu import SelectMenu
def start_menu():
screen = StartMenu()
game = True
while game:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
screen.draw()
pygame.display.flip()
clock.tick(30)
res = screen.get_result()
if res != -1:
game = False
if res == 1:
select_chr()
def select_chr():
screen = SelectMenu()
game = True
while game:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game = False
screen.draw()
pygame.display.flip()
if __name__ == '__main__':
pygame.init()
size = width, height = 1120, 720
display = pygame.display.set_mode(size)
start_menu()
pygame.quit() |
from __future__ import print_function
# import requests
import csv
import os
import re
# AWS Requirements
import boto3
from botocore.exceptions import ClientError
from botocore.vendored import requests
dynamodb = boto3.client('dynamodb')
def update_saved_info(phone_number, zip_code):
try:
response = dynamodb.get_item(TableName="aqi_db", Key={'phone_number': {'S': phone_number}})
except ClientError as e:
print(e.response['Error']['Message'])
else:
if "Item" not in response:
dynamodb.put_item(TableName="aqi_db", Item={'phone_number': {'S': phone_number}, 'zip_code': {'S': zip_code}})
elif response['Item']['zip_code'] != zip_code:
dynamodb.update_item(TableName="aqi_db", Key={'phone_number': {'S': phone_number}}, UpdateExpression = "set zip_code = :z", ExpressionAttributeValues={":z": {"S": zip_code}})
def check_if_phone_number_saved(phone_number):
try:
response = dynamodb.get_item(TableName="aqi_db", Key={'phone_number': {'S': phone_number}})
except ClientError as e:
print(e.response['Error']['Message'])
else:
if "Item" in response:
return response["Item"]
def get_AirNow_api_key():
return os.environ['airnow_api_key']
def get_AQI(zip_code):
if is_valid_zip_code(zip_code):
response = requests.get('http://www.airnowapi.org/aq/observation/zipCode/current/?format=text/csv&zipCode=%s&distance=10&API_KEY=%s' % (zip_code, get_AirNow_api_key()))
response_body = response.text
csv_reader = csv.DictReader(response_body.splitlines())
for row in csv_reader:
# Only sending PM 2.5 for now, can incorporate Ozone later
if (row['ParameterName'] == "PM2.5"):
return (row['AQI'], row['CategoryName'], row['HourObserved'])
return None
def format_AQI(aqi_report, zip_code):
if (aqi_report == None):
return 'Sorry, we could not get the latest AQI data for ZIP code: %s :(' % (zip_code)
else:
return '%s:00:00 - Air Quality: %s, PM 2.5: %s in %s' % (aqi_report[2], aqi_report[1], aqi_report[0], zip_code)
def is_valid_zip_code(zip_code):
regex = re.compile('^[0-9]{5}(?:-[0-9]{4})?$')
return regex.match(zip_code)
def lambda_handler(event, context):
print("Received event: " + str(event))
zip_code = event['Body']
phone_number = event['From'][3:]
aqi = get_AQI(zip_code)
update_saved_info(phone_number, zip_code)
formatted_text = format_AQI(aqi, zip_code)
return '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\
'<Response><Message><Body>%s</Body></Message></Response>' % (formatted_text) |
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from registration import views
from registration.views import *
from registration.models import *
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'^$', UserRegistrationView.as_view(), name='user_signup'),
url(r'^user/success/', TemplateView.as_view(template_name='success.html'),
name='page')
]
|
def row_weights(array):
f_team = sum([c for i, c in enumerate(array) if i%2 == 0])
return (f_team, sum(array) - f_team)
'''
Scenario
Several people are standing in a row divided into two teams.
The first person goes into team 1, the second goes into team 2,
the third goes into team 1, and so on.
Task
Given an array of positive integers (the weights of the people),
return a new array/tuple of two integers, where the first one is the
total weight of team 1, and the second one is the total weight of team 2.
Notes
Array size is at least 1.
All numbers will be positive.
Input >> Output Examples
1- rowWeights([13, 27, 49]) ==> return (62, 27)
Explanation:
The first element 62 is the total weight of team 1, and the second element 27
is the total weight of team 2.
2- rowWeights([50, 60, 70, 80]) ==> return (120, 140)
Explanation:
The first element 120 is the total weight of team 1, and the second element 140
is the total weight of team 2.
'''
|
from flask import Flask, jsonify, request, Blueprint
from ..models.models import (
Users, get_all_users, get_user_by_id, update_admin_status, get_menu, get_username, get_user_orders,
get_orders, get_order_by_id, insert_response)
from ..controllers import (registration_controller,
login_controller, menu_controller, orders_controller)
from ..controllers.menu_controller import admin_required
from flask_jwt_extended import jwt_required, get_jwt_identity
from flasgger import swag_from
JSON_MIME_TYPE = 'application/json'
admin = Blueprint("admin_route", __name__)
users = Blueprint("user_route", __name__)
auth = Blueprint("authentication", __name__)
@auth.route("/signup", methods=['POST'])
@swag_from('../docs/signup.yml')
def signup():
"""This route handles registration of a new user"""
if request.content_type != JSON_MIME_TYPE:
return jsonify({"Error": "Invalid content_type"}), 400
return registration_controller.register_user()
@auth.route("/login", methods=['POST'])
@swag_from('../docs/login.yml')
def login():
"""This route handles user login"""
if request.content_type != JSON_MIME_TYPE:
return jsonify({"Error": "Invalid content_type"}), 400
return login_controller.signin()
@admin.route("/users", methods=['GET'])
@swag_from('../docs/users.yml')
@admin_required
def get_users():
'''This route returns all the users in the database'''
all_users = get_all_users()
return jsonify({"users": all_users}), 200
@admin.route("/menu", methods=['POST'])
@swag_from('../docs/meals/add_meal.yml')
@admin_required
def add_meal():
'''This route handles adding of a meal option to the menu'''
if request.content_type != JSON_MIME_TYPE:
return jsonify({"Error": "Invalid content_type"}), 400
return menu_controller.add_meal_option()
@admin.route("/menu/<int:meal_id>", methods=['PUT'])
@swag_from('../docs/meals/edit_menu.yml')
@admin_required
def edit_meal(meal_id):
'''This route handles the editing of a meal option'''
if request.content_type != JSON_MIME_TYPE:
return jsonify({"Error": "Invalid content_type"}), 400
return menu_controller.update_meal_option(meal_id)
@admin.route("/menu/<int:meal_id>/delete", methods=['DELETE'])
@swag_from('../docs/meals/delete_meal.yml')
@admin_required
def delete_meal(meal_id):
'''This route handles the deleting of a meal option'''
return menu_controller.remove_meal(meal_id)
@admin.route("/menu", methods=['GET'])
@swag_from('../docs/meals/admin_view_menu.yml')
@admin_required
def view_menu():
admin_menu = get_menu()
return jsonify({"menu": admin_menu}), 200
@users.route("/menu", methods=['GET'])
@jwt_required
@swag_from('../docs/user_view_menu.yml')
def see_menu():
"""This endpoint handles the viewing of the available menu by the user"""
user_menu = get_menu()
return jsonify({"menu": user_menu}), 200
@users.route("/orders", methods=['POST'])
@jwt_required
@swag_from('../docs/orders/add_order.yml')
def place_order():
'''This endpoint handles the placing of an order by the user'''
if request.content_type != JSON_MIME_TYPE:
return jsonify({"Error": "Invalid content_type"}), 400
user_id = get_jwt_identity()
username = get_username(user_id)
return orders_controller.make_order(username)
@users.route("/orders", methods=['GET'])
@jwt_required
@swag_from('../docs/orders/view_orders.yml')
def view_orders():
'''This endpoint handles the viewing of user orders'''
user_id = get_jwt_identity()
username = get_username(user_id)
user_orders = get_user_orders(username)
return jsonify({"Your orders": user_orders}), 200
@admin.route("/orders", methods=['GET'])
@swag_from('../docs/orders/view_all_orders.yml')
@admin_required
def get_all_orders():
'''This endpoint returns all the orders made'''
all_orders = get_orders()
return jsonify({"orders": all_orders}), 200
@admin.route("/orders/<int:order_id>", methods=['GET'])
@swag_from('../docs/orders/view_one.yml')
@admin_required
def get_one_order(order_id):
'''This endpoint returns one order'''
if get_order_by_id(order_id) is None:
return jsonify({"error": "Order not found!"}), 404
one_order = get_order_by_id(order_id)
return jsonify({"order": one_order}), 200
@admin.route("/orders/<int:order_id>", methods=['PUT'])
@swag_from('../docs/orders/update_status.yml')
@admin_required
def update_status(order_id):
'''This route handles updating of an order status'''
if get_order_by_id(order_id) is None:
return jsonify({"error": "order not found!"}), 404
status_list = ["Processing", "Cancelled", "Complete"]
status = request.json.get("status")
if status not in status_list:
return jsonify({"error": "Add correct status"}), 405
insert_response(status, order_id)
return jsonify({"current_status": status}, {"order": get_order_by_id(order_id)}), 200
|
#!/usr/bin/env python
__author__ = "Master Computer Vision. Team 02"
__license__ = "M6 Video Analysis"
# Import libraries
import os
import sys
import cv2
import math
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage.measure import block_reduce
from matplotlib.ticker import FuncFormatter
def to_percent(y, position):
"""
Description: to percentantge
Input: y, position
Output: none
"""
s = str(100 * y)
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
def plot_histogram(msen, error, filename):
"""
Description: plot histogram
Input: msen, error, filename
Output: none
"""
plt.hist(msen, bins = 25, normed = True)
formatter = FuncFormatter(to_percent)
plt.gca().yaxis.set_major_formatter(formatter)
plt.xlabel('MSEN value')
plt.ylabel('Number of Pixels')
plt.title("Histogram of scene %s. \n Percentage of Erroneous Pixels in Non-occluded areas (PEPN): %d %%" % (filename, error))
plt.show()
def plot_motion(frame, motion, N):
"""
Description: Plot motion vector field
Input: frame, motion, N
Output: none
"""
u = []
v = []
c1, c2, c3 = motion.shape
motion = block_reduce(motion, block_size=(N, N, 1), func=np.mean)
c1, c2, c3 = motion.shape
x, y = np.meshgrid(np.arange(0, c2, 1), np.arange(0, c1, 1))
motion_u = [(((float)(motion[:, :, 0].flat[pixel]) - math.pow(2, 15)) / 64.0)/200.0 if motion[:, :, 0].flat[pixel] == 1 else 0
for pixel in range(0, motion[:, :, 0].size)]
motion_v = [(((float)(motion[:, :, 1].flat[pixel]) - math.pow(2, 15)) / 64.0)/200.0 if motion[:, :, 0].flat[pixel] == 1 else 0
for pixel in range(0, motion[:, :, 0].size)]
u = np.reshape(motion_u, (c1, c2))
v = np.reshape(motion_v, (c1, c2))
img = resize(frame, (c1, c2))
plt.imshow(img)
Q = plt.quiver(x, y, u, v, pivot='mid', units='inches', scale_units='inches')
plt.show(Q)
|
# coding=utf-8
import smtplib
from email.mime.text import MIMEText
class mailSender(object):
def __init__(self):
self.server = 'smtp.domain.com'
self.username = '发信人名称'
self.password = 'password'
self.port = 25
self.sender = 'admin@domain.com'
def send(self, toList, subject, body):
msg = MIMEText(body, 'plain', 'utf-8')
msg['From'] = self.sender
msg['To'] = ','.join(toList)
msg['Subject'] = subject
try:
smtp_client = smtplib.SMTP()
smtp_client.connect(self.server, self.port)
rs = smtp_client.login(self.username, self.password)
#print(f'登录结果, {rs}')
print('登录结果', rs)
if rs and rs[0] == 235:
print('登录成功')
smtp_client.sendmail(self.sender, toList, msg.as_string())
else:
#print(f'登录失败, code={rs[0]}')
print('登录失败, code=', rs[0])
smtp_client.close()
except Exception as e:
#print(f'发送邮件失败, {e}')
print('发送邮件失败', e)
if __name__ == '__main__':
mailsender = mailSender()
mailsender.send(['receiver@domain.com'], 'scrapy测试邮件', 'only for test scrapy') |
../gasp/Rmag_aperture_annulus_r_file_median_w1_subplot_date_target.py |
import random
from uuid import UUID
from wacryptolib.cryptainer import CryptainerStorage, dump_cryptainer_to_filesystem, PAYLOAD_CIPHERTEXT_LOCATIONS
class FakeTestCryptainerStorage(CryptainerStorage):
"""Fake class which bypasses encryption and forces filename unicity regardless of datetime, to speed up tests..."""
increment = 0
def enqueue_file_for_encryption(self, filename_base, payload, **kwargs):
super().enqueue_file_for_encryption(filename_base + (".%03d" % self.increment), payload, **kwargs)
self.increment += 1
def _use_streaming_encryption_for_cryptoconf(self, cryptoconf):
return self._offload_payload_ciphertext # Do NOT dig cryptoconf here, it might be all wrong
def _encrypt_payload_and_stream_cryptainer_to_filesystem(
self, payload, cryptainer_filepath, cryptainer_metadata, default_keychain_uid, cryptoconf
):
cryptainer = self._encrypt_payload_into_cryptainer( # No streaming pipeline in this FAKE class!
payload,
cryptainer_metadata=cryptainer_metadata,
default_keychain_uid=default_keychain_uid,
cryptoconf=cryptoconf,
)
dump_cryptainer_to_filesystem(cryptainer_filepath, cryptainer=cryptainer, offload_payload_ciphertext=True)
def _encrypt_payload_into_cryptainer(self, payload, **kwargs):
return dict(
a=33,
payload_ciphertext_struct=dict(
ciphertext_location=PAYLOAD_CIPHERTEXT_LOCATIONS.INLINE, ciphertext_value=payload
),
)
def _decrypt_payload_from_cryptainer(self, cryptainer, **kwargs):
return cryptainer["payload_ciphertext_struct"]["ciphertext_value"], []
class WildcardUuid:
"""Dummy UUID wildcard to compare data trees containing any UUID"""
def __eq__(self, other):
return isinstance(other, UUID)
def get_fake_authdevice(device_path):
"""Return a dict representing a fak authentication device."""
authdevice = {
"device_type": "USBSTOR",
"partition_mountpoint": device_path,
"partition_label": "TOSHIBA",
"filesystem_size": 31000166400,
"filesystem_format": "fat32",
"authenticator_dir": device_path / ".myauthenticator",
}
return authdevice
def random_bool():
return random.choice((True, False))
|
#encoding=utf-8
import os
import sys
import mmap
import time
import socket
import struct
import elb_pb2
from StaticRoute import StaticRoute
from CacheLayer import CacheUnit
class FormatError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class elbClient:
def __init__(self):
self._socks = [None for i in range(3)]
try:
self._socks[0] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socks[1] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socks[2] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error, errMsg:
print errMsg
exit(1)
self.__seq = 0
self.__f = os.open('/tmp/hb_map.bin', os.O_RDONLY)
self.__m = mmap.mmap(self.__f, 8, flags = mmap.MAP_SHARED, prot = mmap.PROT_READ, offset = 0)
self.__staticRoute = StaticRoute()
self.__agentOff = True
self.cache = {}
self.tsget = 0
def __del__(self):
for key in self.cache:
modid, cmdid = key
self.__batchReportRes(self.cache[(modid, cmdid)])
for sock in self._socks:
sock.close()
os.close(self.__f)
def agentDie(self):
currTs = int(time.time())
s = self.__m.read(8)
ts = struct.unpack('q', s)[0]
self.__m.seek(0)
return currTs - ts > 2
def apiGetHost(self, modid, cmdid, timo):
if self.agentDie():
self.__agentOff = True
staticData = self.__staticRoute.getHost(modid, cmdid)
if staticData == -1:
return (-9998, 'no exist')
return (0, staticData)
self.__staticRoute.freeData()
self.__agentOff = False
#get host from cache
currTs = int(time.time())
if (modid, cmdid) not in self.cache:
#此mod不在缓存中,则向agent获取
ret, err = self.__getRoute4Cache(modid, cmdid, currTs)
if ret:
return (ret, err)
#从缓存中获取刚拉到的此mod条目
cacheItem = self.cache.get((modid, cmdid), None)
if cacheItem is None:
return (-9998, 'no exist')
#检查此mod的缓存条目是否超时:
#如果上次更新是2s前,则重拉取一次
#顺便先把暂存的上报信息报上去(如果有的话)
if currTs - cacheItem.lstUpdTs >= 2:
self.__batchReportRes(cacheItem)
self.__getRoute4Cache(modid, cmdid, currTs)
#从缓存中获取刚拉到的此mod条目
cacheItem = self.cache.get((modid, cmdid), None)
if cacheItem is None:
#说明agent端已经不存在此mod了,返回不存在错误
return (-9998, 'no exist')
#如果此mod在API端没有overload节点,则从缓存中获取节点,否则走网络
if not cacheItem.overload:
ip, port = cacheItem.getHost()
self.tsget = int(time.time() * 1000)
return (0, (ip, port))
#get host from local network
if timo < 10: timo = 10
if timo > 1000: timo = 1000
i = (modid + cmdid) % 3
sock = self._socks[i]
if self.__seq == 2 ** 31: self.__seq = 0
#create request
req = elb_pb2.GetHostReq()
req.seq = self.__seq
self.__seq += 1
req.modid = modid
req.cmdid = cmdid
rsp = elb_pb2.GetHostRsp()
#send request
bodyStr = req.SerializeToString()
reqStr = struct.pack('i', elb_pb2.GetHostReqId) + struct.pack('i', len(bodyStr)) + bodyStr
try:
sock.sendto(reqStr, ('127.0.0.1', 8888 + i))
except socket.error, errMsg:
print >> sys.stderr, errMsg
return (-9999, errMsg)
try:
#recv response
sock.settimeout(timo * 0.001)
rspStr, addr = sock.recvfrom(65536)
rspId = struct.unpack('i', rspStr[:4])[0]
bodyLen = struct.unpack('i', rspStr[4:8])[0]
if rspId != elb_pb2.GetHostRspId or bodyLen != len(rspStr[8:]):
raise FormatError('message head format error')
rsp.ParseFromString(rspStr[8:])
while rsp.seq < req.seq:
rspStr, addr = sock.recvfrom(65536)
rspId = struct.unpack('i', rspStr[:4])[0]
bodyLen = struct.unpack('i', rspStr[4:8])[0]
if rspId != elb_pb2.GetHostReqId or bodyLen != len(rspStr[8:]):
raise FormatError('message head format error')
rsp.ParseFromString(rspStr[8:])
if rsp.seq != req.seq:
print >> sys.stderr, 'request seq id is %d, response seq id is %d' % (req.seq, rsp.seq)
return (-9999, 'request seq id is %d, response seq id is %d' % (req.seq, rsp.seq))
elif rsp.retcode != 0:
errMsg = ''
if rsp.retcode == -10000:
errMsg = 'overload'
elif rsp.retcode == -9998:
errMsg = 'no exist'
else:
errMsg = 'agent error'
return (rsp.retcode, errMsg)
else:
ipn = rsp.host.ip
if ipn < 0:
ipn += 2 ** 32
ips = socket.inet_ntoa(struct.pack('I', ipn))
self.tsget = int(time.time() * 1000)
return (0, (ips, rsp.host.port))
except socket.timeout:
print >> sys.stderr, 'time out when recvfrom socket'
return (-9999, 'time out when recvfrom socket')
except FormatError as e:
return (-9999, e.value)
def apiReportRes(self, modid, cmdid, ip, port, res):
if self.__agentOff:
return
cu = self.cache.get((modid, cmdid), None)
if cu and not cu.overload:
if res == 0:
#report to cache
cu.report(ip, port)
return
else:
#retcode != 0 立即将之前暂存的report状态上报予agent,如果有的话
self.__batchReportRes(cu)
#report by local network
i = (modid + cmdid) % 3
sock = self._socks[i]
#create request
req = elb_pb2.ReportReq()
req.modid = modid
req.cmdid = cmdid
req.retcode = res
if res:#如果对节点的调用是失败的,则也需要上报调用消耗的毫秒级时间
req.tcost = int(time.time() * 1000) - self.tsget
#ip is str, but req.host.ip need big-endian number
ipn = struct.unpack('I', socket.inet_aton(ip))[0]
if ipn > 2 ** 31 - 1:
ipn -= 2 ** 32
req.host.ip = ipn
req.host.port = port
bodyStr = req.SerializeToString()
reqStr = struct.pack('i', elb_pb2.ReportReqId) + struct.pack('i', len(bodyStr)) + bodyStr
sock.sendto(reqStr, ('127.0.0.1', 8888 + i))
def apiRegister(self, modid, cmdid):#非必需使用的API
for i in range(3):
ret, hostOrEmsg = self.apiGetHost(modid, cmdid, 50)
if ret == 0:
break
time.sleep(0.05)
return 0 if ret == 0 else -9998
def __batchReportRes(self, cu):
if cu.succCnt == 0:
return
modid, cmdid = cu.modid, cu.cmdid
i = (modid + cmdid) % 3
sock = self._socks[i]
#create request
req = elb_pb2.CacheBatchRptReq()
req.modid = modid
req.cmdid = cmdid
for key in cu.succAccum:
ip, port = key
if cu.succAccum[key] == 0:
continue
#ip is str, but req.host.ip need big-endian number
ipn = struct.unpack('I', socket.inet_aton(ip))[0]
if ipn > 2 ** 31 - 1:
ipn -= 2 ** 32
h = req.results.add()
h.ip = ipn
h.port = port
h.succCnt = cu.succAccum[key]
#reset accumulator to 0
cu.succAccum[key] = 0
bodyStr = req.SerializeToString()
reqStr = struct.pack('i', elb_pb2.CacheBatchRptReqId) + struct.pack('i', len(bodyStr)) + bodyStr
sock.sendto(reqStr, ('127.0.0.1', 8888 + i))
cu.succCnt = 0
def __getRoute4Cache(self, modid, cmdid, ts):
cacheItem = self.cache.get((modid, cmdid), None)
req = elb_pb2.CacheGetRouteReq()
req.modid, req.cmdid = modid, cmdid
req.version = self.cache[(modid, cmdid)].version if cacheItem else -1
rsp = elb_pb2.CacheGetRouteRsp()
bodyStr = req.SerializeToString()
reqStr = struct.pack('i', elb_pb2.CacheGetRouteReqId) + struct.pack('i', len(bodyStr)) + bodyStr
i = (modid + cmdid) % 3
sock = self._socks[i]
#send
try:
sock.sendto(reqStr, ('127.0.0.1', 8888 + i))
except socket.error, errMsg:
print >> sys.stderr, errMsg
return (-9999, errMsg)
try:
#recv response
sock.settimeout(0.05)
rspStr, addr = sock.recvfrom(65536)
rspId = struct.unpack('i', rspStr[:4])[0]
bodyLen = struct.unpack('i', rspStr[4:8])[0]
if rspId != elb_pb2.CacheGetRouteRspId or bodyLen != len(rspStr[8:]):
raise FormatError('message head format error')
rsp.ParseFromString(rspStr[8:])
if rsp.modid != modid or rsp.cmdid != cmdid:
print >> sys.stderr, 'package content error'
return (-9999, 'package content error')
#已收到回复
if rsp.version == -1:
#remove cache if exist
if cacheItem:
del self.cache[(modid, cmdid)]
elif not cacheItem or cacheItem.version != rsp.version:
#update route
if not cacheItem:
cacheItem = CacheUnit()
cacheItem.modid = modid
cacheItem.cmdid = cmdid
self.cache[(modid, cmdid)] = cacheItem
cacheItem.overload = rsp.overload
cacheItem.version = rsp.version
cacheItem.succCnt = 0
cacheItem.lstUpdTs = ts
cacheItem.nodeList = []
cacheItem.succAccum = {}
#添加路由信息
for h in rsp.route:
ipn = h.ip
if ipn < 0:
ipn += 2 ** 32
ips = socket.inet_ntoa(struct.pack('I', ipn))
cacheItem.nodeList.append((ips, h.port))
cacheItem.succAccum[(ips, h.port)] = 0
else:
cacheItem.overload = rsp.overload
cacheItem.succCnt = 0
cacheItem.lstUpdTs = ts
except socket.timeout:
print >> sys.stderr, 'time out when recvfrom socket'
return (-9999, 'time out when recvfrom socket')
except FormatError as e:
return (-9999, e.value)
return (0, '')
|
import cv2 as cv
import numpy as np
from basic_functions import read_image, show_images
from luminance_correction import luminance_correction
from copy import deepcopy
def create_eye_map_c(imgYCrCb):
"""
Function responsible for calculate eye map from chrominance components
:param imgYCrCb: image in YCrCb space
:return: Eye map from chrominance components
"""
processed_img = deepcopy(imgYCrCb)
h = imgYCrCb.shape[0]
w = imgYCrCb.shape[1]
cb_sqr = np.zeros(imgYCrCb.shape)
cb_over_cr = np.zeros(imgYCrCb.shape)
cr_neg_sqr = np.zeros(imgYCrCb.shape)
for i in range(h):
for j in range(w):
cb_sqr[i, j] = imgYCrCb[i, j, 2] ** 2
cb_over_cr[i, j] = imgYCrCb[i, j, 2] / imgYCrCb[i, j, 1]
cr_neg_sqr[i, j] = (255 - imgYCrCb[i, j, 1]) ** 2
cv.normalize(cb_sqr, cb_sqr, 0, 255, cv.NORM_MINMAX)
cv.normalize(cb_over_cr, cb_over_cr, 0, 255, cv.NORM_MINMAX)
cv.normalize(cr_neg_sqr, cr_neg_sqr, 0, 255, cv.NORM_MINMAX)
for i in range(h):
for j in range(w):
processed_img[i, j] = (cb_sqr[i, j] + cr_neg_sqr[i, j] +
cb_over_cr[i, j]) / 3
processed_img[:, :, 0] = cv.equalizeHist(processed_img[:, :, 0])
processed_img[:, :, 1] = cv.equalizeHist(processed_img[:, :, 1])
processed_img[:, :, 2] = cv.equalizeHist(processed_img[:, :, 2])
return processed_img
def create_eye_map_y(imgYCrCb):
"""
Function responsible for creating eye map from luminance component.
Not working yet probably due to structurAing element.
:param imgYCrCb: image in YCrCb space
:return: Eye map from luminance component
"""
processed_img = deepcopy(imgYCrCb)
str_el = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
y_dilation = cv.dilate(processed_img[:, :, 0], str_el)
y_erosion = cv.erode(processed_img[:, :, 0], str_el)
h = imgYCrCb.shape[0]
w = imgYCrCb.shape[1]
for i in range(h):
for j in range(w):
processed_img[i, j] = 10 * (y_dilation[i, j] / (1 + y_erosion[i, j] / 10))
return processed_img
def final_c_y_mask(eye_map_c, eye_map_y):
"""
Function responsible for joining two eye maps together
:param eye_map_c: C eye map, result of function create_eye_map_c
:param eye_map_c: Y eye map, result of function create_eye_map_y
:return: Joined eye maps
"""
return cv.bitwise_and(eye_map_c, eye_map_y)
if __name__ == '__main__':
"""
Only for testing in final version it must be moved to main.py file
"""
img = read_image("data/07-1m.bmp")
img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
luminance_correction(img_rgb)
imgYCrCb = cv.cvtColor(img_rgb, cv.COLOR_RGB2YCrCb)
eye_map_c = create_eye_map_c(imgYCrCb)
eye_map_y = create_eye_map_y(imgYCrCb)
final_mask = final_c_y_mask(eye_map_c, eye_map_y)
show_images([imgYCrCb, eye_map_c, eye_map_y, final_mask], 4, 1)
|
# -*- encoding: UTF-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=100, verbose_name=u'Nom de la Catégorie')
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __unicode__(self):
return u'%s : %s' % (_('Category'), self.name)
class Product(models.Model):
name = models.CharField(max_length=100, verbose_name=u'Nom du produit')
content = models.TextField(verbose_name=u'Description', blank=True)
price = models.FloatField(verbose_name=u'Prix d\'achat')
categories = models.ManyToManyField(Category, verbose_name=u'Catégories')
class Meta:
verbose_name = _('Product')
verbose_name_plural = _('Products')
def __unicode__(self):
return u' '.join([self.name, str(self.price), self.get_categories()])
def get_categories(self):
return u', '.join([obj.name for obj in self.categories.all()])
get_categories.short_description = "Categories"
class ProductImage(models.Model):
image = models.ImageField(verbose_name=u'Image produit', upload_to='products/images/')
product = models.ForeignKey('Product', verbose_name=u'Produit associé')
class Meta:
verbose_name = _('ProductImage')
verbose_name_plural = _('ProductImages')
def __unicode__(self):
return u', '.join([self.product.name, self.image.url])
class Customer(User):
profile_picture = models.ImageField(verbose_name=u'Photo de profil', upload_to='customer/images/')
class Meta:
verbose_name = _('Customer')
verbose_name_plural = _('Customers')
def __unicode__(self):
return u', '.join([self.username, self.first_name, self.last_name])
class Cart(models.Model):
OPEN, SUBMITTED = ("Open", "Submitted")
STATUS_CHOICES = (
(OPEN, "Open - currently active"),
(SUBMITTED, "Submitted - has been ordered at the checkout"),
)
status = models.CharField(verbose_name=u'Status', max_length=128, default=OPEN, choices=STATUS_CHOICES)
customer = models.ForeignKey('Customer', verbose_name=u'Client')
class Meta:
verbose_name = _('Cart')
verbose_name_plural = _('Carts')
def get_total(self):
total = 0
for item in self.cartitem_set.all():
total += item.get_total_line()
return total
def __unicode__(self):
return u' ~ '.join([self.customer.username, self.status, str(self.get_total)])
class CartItem(models.Model):
product = models.ForeignKey('Product', verbose_name=u'Produit')
quantity = models.PositiveIntegerField(verbose_name=u'Quantité')
cart = models.ForeignKey('Cart', verbose_name=u'Panier')
class Meta:
verbose_name = _('CartItem')
verbose_name_plural = _('CartItems')
def get_total_line(self):
return self.quantity * self.product.price
def __unicode__(self):
return u' ~ '.join([self.product.name, str(self.quantity), str(self.get_total_line)])
class Order(models.Model):
cart = models.ForeignKey('Cart', verbose_name=u'Panier')
customer = models.ForeignKey('Customer', verbose_name=u'Client')
class Meta:
verbose_name = _('Order')
verbose_name_plural = _('Orders')
def __unicode__(self):
return u'Order :' + u' '.join([self.cart.__unicode__(), self.customer.__unicode__()])
class Comment(models.Model):
product = models.ForeignKey('Product', verbose_name=u'Produit')
customer = models.ForeignKey('Customer', verbose_name=u'Client')
text = models.TextField(verbose_name=u'Texte')
class Meta:
verbose_name = _('Comment')
verbose_name_plural = _('Comments')
def __unicode__(self):
return u' ~ '.join([self.product.name, self.customer.username, self.text])
|
#-*- coding:utf-8 –*-
from random import uniform, sample
from numpy import *
from copy import deepcopy
import ConnectEndPoint
from SPARQLWrapper import SPARQLWrapper, JSON
FBdir = "/Users/wenqiangliu/Documents/KG2E/data/FB15k/entity2id.txt"
sp = "\t"
idNum = 0
FBdic = {} # freebase Entity,key is entity; value=-1
with open(FBdir) as file:
print("Begain Reading Freebase File")
lines = file.readlines()
for line in lines:
DetailsAndId = line.strip().split(sp)
FBdic[DetailsAndId[0][1:].replace("/", ".")] = -1
idNum += 1
file.close()
print("The Freebase Fils is Done")
DB_FB_dir = "/Users/wenqiangliu/Desktop/freebase_links_en.nt" # 读取DBpedia Links to freebase
splinks = " "
DBdic = {} # Dbpedia dic, key is entity and value is freebase entity
idNum = 0
with open(DB_FB_dir) as file:
print("Begain Extract DBpedia Entity")
lines = file.readlines()
for line in lines:
DetailsAndId = line.strip().split(splinks)
db = DetailsAndId[0][1:-1]
fb = DetailsAndId[2][28:-1]
if fb in FBdic.keys():
DBdic[db] = fb
idNum += 1
print("The DBpedia Entity is ready, the total number of entity is %d" %(len(DBdic)))
endpoint = "http://dbpedia.org/sparql/"
error = 0
print("Remove Duplicates")
CopyDBdic = deepcopy(DBdic)
index=0
for dic in DBdic.keys():
sparqlString = "select ?p ?o where {<" + dic + "> ?p ?o}"
index+=1
if index % 100 ==0:
print("Processing the %d"%(index))
biordfsparql = ConnectEndPoint.ConnectEnd(endpoint, sparqlString)
results = biordfsparql.Connect()
for result in results["results"]["bindings"]:
if result["p"]["value"] == "http://dbpedia.org/ontology/wikiPageRedirects":
if result["o"]["value"] in DBdic.keys():
CopyDBdic.pop(dic)
else:
error += 1
DBdic = CopyDBdic
print("The length of DBpedia dic is %d" % (len(DBdic)))
print("Remove Duplicates is Done, the error is %d" % (error))
print("Writing Entity")
entityFile = open("/Users/wenqiangliu/Documents/KGEmbedding/data/FB15k/entity_DBpedi2id.txt",'w')
SameAsFile = open("/Users/wenqiangliu/Documents/KGEmbedding/data/FB15k/DB_FB.txt", 'w')
entityId = 1
for entity in DBdic.keys():
entityFile.write(entity + "\t" + entityId)
SameAsFile.write(entity + "\t" + DBdic[entity])
entityId += 1
print("All are Done!")
|
from configparser import ConfigParser
# 常量名全部大写
CONFIG_FILE = "config.txt"
config = ConfigParser()
# 读取配置文件
config.read(CONFIG_FILE, encoding='gb2312')
# 获取messages区段的内容
greeting = config.get('messages', 'greeting')
print(greeting)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.