text
stringlengths 8
6.05M
|
|---|
'''
Created on Jan 24, 2016
@author: Andrei Padnevici
@note: This is an exercise: 11.1
'''
import re
try:
fName = input("Enter file name: ")
if fName == "": fName = "mbox-short.txt"
file = open(fName)
except:
print("Invalid file")
exit()
txt = file.read()
revisons = re.findall("New Revision: ([\d]+)", txt)
print(sum([(float(n)) for n in revisons]) / len(revisons))
|
#! /usr/bin/python
import sys
import time
def test01 ():
k = 0
try:
buff = ''
while True:
buff += sys.stdin.read(1)
if buff.endswith('\n'):
print "XXXX: " + buff[:-1]
buff = ''
k = k + 1
except KeyboardInterrupt:
sys.stdout.flush()
pass
print k
def test02 ():
for line in sys.stdin:
line = line.replace ('\n', '');
print "XXXX: " + line
# test01 ();
test02 ();
|
# 文件复制
'''with open('test.txt','r') as read_f,open('test1.txt','w+') as write_f:
data=read_f.read()
data=write_f.write(data)'''
# 图片复制
with open('176817195_49.jpg','rb') as img_f,open('123.jpg','wb') as write_img:
data=img_f.read()
data=write_img.write(data)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = patterns('',
url(r'^messages/', include('privatemessages.urls')),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('tornado_chat.views',
url(r'^$','log_in_chat' ),
url(r'^registration/$', 'reg'),
)
urlpatterns+=staticfiles_urlpatterns()
|
#! python3
import pymysql
import csv
db = pymysql.connect(host='localhost', user='mat', password='1980mk**', port=3306, db='spiders')
db.set_charset('utf8mb4')
cursor = db.cursor()
#sql = 'CREATE TABLE IF NOT EXISTS
# maoyan (排名 INT(4), 片名 VARCHAR(100), 演员 VARCHAR(255), 上映时间 VARCHAR(100), 评分 FLOAT(1,1))'
#cursor.execute(sql)
sql = 'ALTER TABLE maoyan DROP 排名'
cursor.execute(sql)
sql = 'ALTER TABLE maoyan ADD 排名 INT(4) PRIMARY KEY AUTO_INCREMENT FIRST'
cursor.execute(sql)
csvfile = open('data.csv', 'r')
reader = csv.reader(csvfile)
for row in reader:
if row[0] == '排名':
continue
data = {
'排名': int(row[0]),
'片名': row[1],
'演员': row[2],
'上映时间': row[3],
'评分': float(row[4])
}
table = 'maoyan'
keys = ', '.join(data.keys())
values = ', '.join(['%s'] * len(data))
sql = 'INSERT INTO {table}({keys}) VALUES ({values}) ON DUPLICATE KEY UPDATE '.format(table=table, keys=keys, values=values)
update = ', '.join(['{key}=%s'.format(key=key) for key in data.keys()])
sql += update
try:
cursor.execute(sql, tuple(data.values())*2)
db.commit()
except Exception as exc:
print(exc)
db.rollback()
sql = 'SELECT * FROM maoyan'
#sql2 = 'SELECT * FROM maoyan WHERE 排名 <= 10'
try:
cursor.execute(sql)
row = cursor.fetchone()
while row:
print(row)
row = cursor.fetchone()
except:
print('error')
db.close()
|
from flask import Flask
from flask.ext.babel import Babel, get_translations
from flask.ext.openid import OpenID
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.redis import Redis
from flask.ext.compass import Compass
from . import filters, context_processors, utils, ext
import __builtin__
import pytz
import logging.handlers
redis = Redis()
db = SQLAlchemy()
auth = ext.Auth()
postmark = ext.Postmark()
compass = Compass()
oid = OpenID()
babel = Babel()
def create_app(envar="FLASK_SETTINGS", config_object=None):
app = Flask(__name__)
if config_object is None:
app.config.from_envvar("FLASK_SETTINGS")
else:
app.config.from_object(config_object)
if 'local_timezone' not in app.config:
app.config['local_timezone'] = pytz.timezone('Europe/Vienna')
if 'type2module' not in app.config:
app.config['type2view'] = {
'meetup': 'meetups.view_doc',
'company': 'companies.view_doc',
}
if 'LOG_FILE' in app.config:
handler = logging.handlers.RotatingFileHandler(app.config['LOG_FILE'],
backupCount=5, maxBytes=1000000)
if 'LOG_LEVEL' in app.config:
handler.setLevel(app.config['LOG_LEVEL'])
else:
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.jinja_env.filters['time'] = filters.timefilter
app.jinja_env.filters['date'] = filters.datefilter
app.jinja_env.filters['datecode'] = filters.datecode
app.jinja_env.filters['datetime'] = filters.datetimefilter
app.jinja_env.filters['rst'] = filters.rst
app.jinja_env.filters['urlencode'] = filters.urlencode
app.jinja_env.filters['urlize'] = filters.urlize
app.jinja_env.filters['in_future'] = filters.in_future
app.secret_key = app.config['SECRET_KEY']
db.init_app(app)
postmark.init_app(app)
auth.init_app(app)
compass.init_app(app)
from . import models
# Register modules
from .views.account import module as account_module
from .views.admin import module as admin_module
from .views.core import module as core_module
from .views.meetups import module as meetups_module
app.register_blueprint(core_module, url_prefix='')
app.register_blueprint(account_module, url_prefix='/account')
app.register_blueprint(admin_module, url_prefix='/admin')
app.register_blueprint(meetups_module, url_prefix='/meetups')
#Register context and request processors
app.context_processor(context_processors.add_form_generator)
# Register babel's i18n functions globally in order for Flatland to see
# them.
babel.init_app(app)
oid.init_app(app)
__builtin__.ugettext = lambda x: get_translations().ugettext(x)
__builtin__.ungettext = lambda x,s,p: get_translations().ungettext(x,s,p)
app.error_handlers[409] = utils.handle_conflict
redis.init_app(app)
return app
|
import numpy as np
import pandas as pd
from math import exp
import copy
from sklearn import preprocessing
# Feed Forward helper methods
def sigmoid(value):
if value < 0:
return 1 - 1 / (1 + exp(value))
else:
return 1.0/(1+exp(value * (-1)))
def sigma(matrix_weight, matrix_input, bias=0):
# Prereq: len(arr_weight) = len(arr_input)
return matrix_weight.dot(matrix_input.transpose()) + bias
# hidden_layer = int (number of hidden layers)
# nb_nodes = arr[int] (number of nodes per hidden layer)
# len_input_matrix = int (number of features)
# Output: List of Matrixes
# Method: He initialization
# Link: https://towardsdatascience.com/weight-initialization-techniques-in-neural-networks-26c649eb3b78
def initialize_weights(hidden_layer, nb_nodes, len_input_matrix):
arr_weight_this_batch = list()
for i in range(hidden_layer):
if i==0:
nb_nodes_prev = len_input_matrix
else:
nb_nodes_prev = nb_nodes[i-1]
weight_matrix = np.random.randn(nb_nodes[i], nb_nodes_prev) * np.sqrt(2/(nb_nodes_prev+nb_nodes[i]))
arr_weight_this_batch.append(weight_matrix)
return arr_weight_this_batch
def error(feed_forward_output, target_output):
return ((target_output-feed_forward_output)**2)
def err_yes(value):
return (value > 0.15) # 15% fault tolerance
# See function: initialize_errors
def initialize_sigma(hidden_layer, nb_nodes):
list_sigma = list()
for i in range(hidden_layer):
arr_sigma = np.zeros(nb_nodes[i])
list_sigma.append(arr_sigma)
return list_sigma
# Backpropagation and Update Weight helper methods
# hidden_layer = int (number of hidden layers)
# nb_nodes = arr[int] (number of nodes per hidden layer)
# Output: List of Matrixes
def initialize_errors(hidden_layer, nb_nodes):
arr_neuron_errors = list()
for i in range(hidden_layer):
arr_error = np.empty(nb_nodes[i])
arr_neuron_errors.append(arr_error)
return arr_neuron_errors
def propagate_error_output_layer(feed_forward_output, target_output):
return feed_forward_output*(1-feed_forward_output)*(target_output-feed_forward_output)
def propagate_error_hidden_layer_neuron(sigma_output, error_contribution):
return sigmoid(sigma_output) * (1 - sigmoid(sigma_output)) * error_contribution
# error = neuron's error
def update_weight_neuron(weight_prev_prev, weight_prev, learning_rate, momentum, error, input_neuron):
# weight_prev_prev = previous of weight_prev
return weight_prev + weight_prev_prev * learning_rate + momentum*error*input_neuron
# input_matrix = matrix[float] (data) (asumsi, kolom terakhir adalah hasil klasifikasi)
# hidden_layers = int (number of hidden layers)
# nb_nodes = arr[int] (number of nodes per hidden layer)
# nu = float (momentum)
# alfa = float (learning rate)
# epoch = int (number of training loops)
# batch_size = int (mini-batch)
# output = FFNN prediction model (list of matrix)
def mini_batch_gradient_descent(input_matrix, hidden_layer, nb_nodes, nu, alfa, epoch, batch_size=1):
#transpose-slicing, memisah input dan label
col_width = input_matrix.shape[1]
input_col_width = col_width - 1
input_data = (input_matrix.transpose()[0:input_col_width]).transpose()
label_data = (input_matrix.transpose()[input_col_width:col_width]).transpose()
hidden_layer += 1
nb_nodes = np.append(nb_nodes, [1])
arr_neuron_errors = initialize_errors(hidden_layer, nb_nodes)
all_sigma_values = initialize_sigma(hidden_layer, nb_nodes)
arr_weight_this_batch = initialize_weights(hidden_layer, nb_nodes, input_col_width)
for no_epoch in range(epoch):
arr_weight_prev_batch = copy.deepcopy(arr_weight_this_batch) # tracking previous state of weights
batch_count = 0
error_value = 0
for no_input_data in range(len(input_data)):
# Feed Forward
one_sigma_values = list()
for no_hidden_layer in range(hidden_layer):
if no_hidden_layer == 0:
one_sigma_values.append(sigma(arr_weight_this_batch[no_hidden_layer], input_data[no_input_data]))
else:
one_sigma_values.append(sigma(arr_weight_this_batch[no_hidden_layer], one_sigma_values[no_hidden_layer-1]))
for no_rows in range(len(one_sigma_values[no_hidden_layer])):
output_i = sigmoid(one_sigma_values[no_hidden_layer][no_rows])
one_sigma_values[no_hidden_layer][no_rows] = output_i
all_sigma_values[no_hidden_layer][no_rows] += output_i
#Result of sigma will be array with 1 element only, so it's safe to select like this
error_value += error(one_sigma_values[hidden_layer - 1][0], label_data[no_input_data])[0]
batch_count += 1
if (batch_count == batch_size):
error_value /= batch_size
for no_hidden_layer in range(hidden_layer):
for no_neuron in range(len(all_sigma_values[no_hidden_layer])):
all_sigma_values[no_hidden_layer][no_neuron] /= batch_size
if (err_yes(error_value)):
# Back Propagation
output_error = propagate_error_output_layer(all_sigma_values[hidden_layer-1][0], label_data[no_input_data])
arr_neuron_errors[hidden_layer - 1][0] = output_error
for no_hidden_layer in range(hidden_layer-2, -1, -1):
for neuron in range(nb_nodes[no_hidden_layer]):
# pencarian error_contribution
error_contribution = 0
for output_neuron in range(nb_nodes[no_hidden_layer+1]):
error_contribution += arr_weight_this_batch[no_hidden_layer + 1][output_neuron][neuron] * arr_neuron_errors[no_hidden_layer + 1][output_neuron]
arr_neuron_errors[no_hidden_layer][neuron] = propagate_error_hidden_layer_neuron(all_sigma_values[no_hidden_layer][neuron], error_contribution)
# Update Weights
for no_hidden_layer in range(1, hidden_layer):
for neuron in range(nb_nodes[no_hidden_layer]):
for weight in range(len(arr_weight_this_batch[no_hidden_layer][neuron])):
arr_weight_this_batch[no_hidden_layer][neuron][weight] = update_weight_neuron(arr_weight_prev_batch[no_hidden_layer][neuron][weight], arr_weight_this_batch[no_hidden_layer][neuron][weight], nu, alfa, arr_neuron_errors[no_hidden_layer][neuron], all_sigma_values[no_hidden_layer-1][weight])
#khusus hidden layer pertama, masukan dari input data
for neuron in range(nb_nodes[0]):
for weight in range(input_col_width):
arr_weight_this_batch[0][neuron][weight] = update_weight_neuron(arr_weight_prev_batch[0][neuron][weight], arr_weight_this_batch[0][neuron][weight], nu, alfa, arr_neuron_errors[0][neuron], input_data[no_input_data][weight])
all_sigma_values = initialize_sigma(hidden_layer, nb_nodes)
error_value = 0
batch_count = 0
return arr_weight_this_batch
# predictor specifically for dataset that is classified into only 2 classes
def predict_2classes(model, arr_features, label):
all_sigma_values = list()
for no_hidden_layer in range(len(model)):
if (no_hidden_layer == 0):
all_sigma_values.append(sigma(model[no_hidden_layer], arr_features))
else:
all_sigma_values.append(sigma(model[no_hidden_layer], all_sigma_values[no_hidden_layer-1]))
for no_rows in range(len(all_sigma_values[no_hidden_layer])):
all_sigma_values[no_hidden_layer][no_rows] = sigmoid(all_sigma_values[no_hidden_layer][no_rows])
error_value = error(all_sigma_values[len(model) - 1][0], label)[0]
return (error_value < 0.5) #scaling : (0, 1)
def accuracy(model, input_matrix):
#transpose-slicing, memisah input dan label
col_width = input_matrix.shape[1]
input_col_width = col_width - 1
input_data = (input_matrix.transpose()[0:input_col_width]).transpose()
label_data = (input_matrix.transpose()[input_col_width:col_width]).transpose()
true_count = 0
false_count = 0
for no_input_data in range(len(input_data)):
if (predict_2classes(model, input_data[no_input_data], label_data[no_input_data])):
true_count += 1
else:
false_count += 1
return true_count / (true_count + false_count) * 100
# dataset load
csv_string = input("Input .csv filename: ")
try:
df = pd.read_csv(csv_string)
except:
print("File not found.")
###quit()
print("File loaded successfuly.")
# dataset preprocess
def preprocess_dataframe(df):
# transform non-numeric data to numeric data
types = df.dtypes
labels = df.columns.values # because pandas select columns using column names
def transform_to_numeric(matrix_data):
for i in range(matrix_data.shape[1]):
type_i = types[i]
if (type_i == object):
values = matrix_data[labels[i]].unique()
dict_i = dict(zip(values, range(len(values)))) # transform every unique object/string into numbers
matrix_data = matrix_data.replace({labels[i]:dict_i})
elif (type_i == bool):
matrix_data[labels[i]] = matrix_data[labels[i]].astype(int)
return matrix_data
newdf = transform_to_numeric(df)
# scaling
def scale_data(matrix_data, min_val, max_val):
def scaling(value):
return (value - minValue)*(max_val - min_val)/(maxValue - minValue) + min_val
for x in range(matrix_data.shape[1]):
minValue = matrix_data[labels[x]].min()
maxValue = matrix_data[labels[x]].max()
matrix_data[labels[x]] = matrix_data[labels[x]].apply(scaling)
return matrix_data
data_matrix = scale_data(newdf, 0, 1)
data_matrix = data_matrix.to_numpy() #convert pandas dataframe to numpy array
return data_matrix
def split_train_test(matrix_data, test_portion):
total_data = len(matrix_data)
total_data_for_test = int(round(test_portion * total_data, 0))
total_data_for_train = total_data - total_data_for_test
return(matrix_data[0:total_data_for_train], matrix_data[total_data_for_train:total_data])
# input and main program
while True:
hidden_layers = int(input("Input number of hidden layers: "))
if (hidden_layers <= 10 and hidden_layers >= 0):
break
else:
print("# of hidden layers must be a positive integer and no more than 10.")
nb_nodes = np.empty(hidden_layers)
for i in range(hidden_layers):
while True:
nb_nodes[i] = int(input("Input number of nodes for hidden layer %d : " % i))
if (nb_nodes[i] > 0):
break
else:
print("# of nodes must be a positive integer.")
while True:
momentum = float(input("Input momentum: "))
if (momentum <= 1 and momentum >= 0):
break
else:
print("Momentum must be between 0 and 1.")
while True:
learning_rate = float(input("Input learning rate: "))
if (learning_rate <= 1 and learning_rate >= 0):
break
else:
print("Learning rate must be between 0 and 1.")
while True:
epoch = int(input("Input epoch: "))
if (epoch > 0):
break
else:
print("Epoch must be a positive integer.")
while True:
batch_size = int(input("Input the batch size: "))
if (batch_size > 0):
break
else:
print("Batch size must be a positive integer.")
while True:
test_size = float(input("Input the test size: "))
if (test_size > 0 and test_size < 1):
break
else:
print("Test size must be between 0 and 1.")
data_matrix = preprocess_dataframe(df)
train_matrix, test_matrix = split_train_test(data_matrix, test_size)
nb_nodes = nb_nodes.astype(int) #diperlukan karena dianggap float dalam fungsi randn jika tak diubah
custom_model = mini_batch_gradient_descent(train_matrix, hidden_layers, nb_nodes, momentum, learning_rate, epoch, batch_size)
print("Accuracy: ", accuracy(custom_model, test_matrix))
|
#!/usr/bin/python
# dedup.py - Sat, 23 Apr 2011 00:03:30 -0400
# As command line arguments are files of md5sum's. The md5sums are read one
# by one in their order and all duplicated files are removed, only the
# files of their first appearance are kept in their old path.
import sys;
import os.path;
f = {} # File hashes
for file in sys.argv:
md5file = open(file)
for line in md5file.read().splitlines():
if f.has_key(line[0:32]):
print "Remove "+line[34:]
os.remove(line[34:])
else:
print "Keep "+line[34:]
f[line[0:32]] = line[34:]
md5file.close()
|
import sys
import os
project = u'Ceph'
copyright = u'2018, SUSE, and contributors. Licensed under Creative Commons BY-SA'
version = 'dev'
release = 'dev'
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = ['**/.#*', '**/*~', 'start/quick-common.rst']
if tags.has('man'):
exclude_patterns += ['architecture.rst', 'glossary.rst', 'release*.rst',
'api/*',
'cephfs/*',
'dev/*',
'install/*',
'mon/*',
'rados/*',
'radosgw/*',
'rbd/*',
'start/*']
pygments_style = 'sphinx'
html_theme = 'ceph'
html_theme_path = ['_themes']
html_title = "Ceph Documentation"
html_logo = 'logo.png'
html_favicon = 'favicon.ico'
html_use_smartypants = True
html_show_sphinx = False
html_sidebars = {
'**': ['smarttoc.html', 'searchbox.html'],
}
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'sphinx_ditaa',
'breathe',
]
todo_include_todos = True
top_level = os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
breathe_default_project = "Ceph"
# see $(top_srcdir)/Doxyfile
breathe_build_directory = os.path.join(top_level, "build-doc")
breathe_projects = {"Ceph": os.path.join(top_level, breathe_build_directory)}
breathe_projects_source = {
"Ceph": (os.path.join(top_level, "src/include/rados"),
["rados_types.h", "librados.h"])
}
breathe_domain_by_extension = {'py': 'py', 'c': 'c', 'h': 'c', 'cc': 'cxx', 'hpp': 'cxx'}
pybind = os.path.join(top_level, 'src/pybind')
if pybind not in sys.path:
sys.path.insert(0, pybind)
|
import sqlite3
conn = sqlite3.connect("factbook.db")
query = "SELECT * FROM facts;"
facts = read_sql_
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
# Create your views here.
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, DetailView, UpdateView, DeleteView
from django.views.generic.list import MultipleObjectMixin
from accountapp.decorators import account_ownership_required
from accountapp.models import HelloWorld
#원래는 4줄짜리로 길게 메서드 데코레이터 했다면 이렇게 리스트로 만들어준후 변수만 넣으면 깔끔하게가능
from articleapp.models import Article
has_ownership = [login_required,account_ownership_required ]
#장고에서 제공해주는 로그인 인증기능으로 밑에 if, else로 로그인 인증하던걸 대체해줌
@login_required
def base(requset):
#로그인 인증이 되어있다면 밑에껄 수행
#if requset.user.is_authenticated:
# get, post method 설정
if requset.method == "POST":
# POST라는 메서드에서 괄호안에 있는 이름을 가진애를 가져와라.
temp = requset.POST.get('hello_world_input')
# HelloWorld는 우리가 만든 모델을 변수
# 그리고 form에 text를 적어 불러오는 temp라는 애를 모델안에 설정한 변수 text에 넣어줌
# 바로 데이터 베이스에 저장
new_hello_world = HelloWorld()
new_hello_world.text = temp
new_hello_world.save()
# 저장된 데이터객체를 내보내주는역할
#return render(requset, 'accountapp/middle.html', context={'hello_world_list': hello_world_list})
#위의 방법으로 는 올때마다 post에서 수행 즉 새로고침 할때마다 그행동(쳤던내용저장)을 반복하기때문에 계속 추가되서 보임 그렇기떄문에 post가 한번
#수행하게되면 get으로 되돌아가게해서 해결
#HttpResposnseRedirect('account/base/')로 해도되지만 추후 편의를 위해 reverse함수를 사용
#accountapp:base 는 account의 urls에 이름을 accountapp이라 하였고 거기에 경로의 이름을 base라 해두어서 이방법 가능
#HttpResponseRedirect는 현재있는곳에서 다른곳으로 이동연결
return HttpResponseRedirect(reverse('accountapp:base'))
else:
#get에서도 post처럼 보이기위해 설정
#HelloWorld의 모델안에 객체들을 모두 꺼내어 변수로 할당(즉 여태form에 text친 내용들을 전부다 변수로 할당)
hello_world_list = HelloWorld.objects.all()
return render(requset, 'accountapp/middle.html', context={'hello_world_list': hello_world_list})
#로그인 인증이 안되있다면 로그인페이지로 보내기
#else:
#return HttpResponseRedirect(reverse('accountapp:login'))
class AccountCreateView(CreateView):
# User를 장고에서 받아와 모델로 설정
model = User
# UserCreationForm을 장고에서 받아와서 form
form_class = UserCreationForm
# class형 view에서는 reverse_lazy를 사용 함수형에서는 reverse를 사용
success_url = reverse_lazy('articleapp:list')
# 이 템플릿을 보여주기
template_name = 'accountapp/create.html'
# account detail에도 내가 만든 project가 보이도록 multipleobjectmixin하기
class AccountDetailView(DetailView, MultipleObjectMixin):
model = User
#인스타로 따지면 다른사람이 나한테왔을때 내게시물들을 볼수있게 설정(detail.html에서 확인)
context_object_name = 'target_user'
template_name = 'accountapp/detail.html'
paginate_by = 20
def get_context_data(self, **kwargs):
article_list = Article.objects.filter(writer=self.object)
return super().get_context_data(object_list=article_list,**kwargs)
#@method_decorator(login_required, 'get')
#@method_decorator(login_required, 'post')
# 우리가 만들어준 해당 유저일때 수행하도록하는 인증 (위에는 로그인만, 두개합쳐서 인증진행하면 로그인한 자기자신만이 접근할수있음)
# 리스트로 만들어서 적용
@method_decorator(has_ownership, 'get')
@method_decorator(has_ownership, 'post')
class AccountUpdateView(UpdateView):
model = User
form_class = UserCreationForm
context_object_name = 'target_user'
success_url = reverse_lazy('accountapp:base')
template_name = 'accountapp/update.html'
# method_decorator는 클래스에서 적용할때 login_required를 사용하기 위해 사용
@method_decorator(has_ownership, 'get')
@method_decorator(has_ownership, 'post')
class AccountDeleteView(DeleteView):
model = User
context_object_name = 'target_user'
success_url = reverse_lazy('articleapp:list')
template_name = 'accountapp/delete.html'
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from enum import IntEnum
from typing import Any
from pants.bsp.spec.base import BSPData, BuildTarget, BuildTargetIdentifier, Uri
# -----------------------------------------------------------------------------------------------
# Workspace Build Targets Request
# See https://build-server-protocol.github.io/docs/specification.html#workspace-build-targets-request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class WorkspaceBuildTargetsParams:
@classmethod
def from_json_dict(cls, _d):
return cls()
def to_json_dict(self):
return {}
@dataclass(frozen=True)
class WorkspaceBuildTargetsResult:
targets: tuple[BuildTarget, ...]
@classmethod
def from_json_dict(cls, d):
return cls(targets=tuple(BuildTarget.from_json_dict(tgt) for tgt in d["targets"]))
def to_json_dict(self):
return {"targets": [tgt.to_json_dict() for tgt in self.targets]}
# -----------------------------------------------------------------------------------------------
# Build Target Sources Request
# See https://build-server-protocol.github.io/docs/specification.html#build-target-sources-request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class SourcesParams:
targets: tuple[BuildTargetIdentifier, ...]
@classmethod
def from_json_dict(cls, d):
return cls(
targets=tuple(BuildTargetIdentifier.from_json_dict(x) for x in d["targets"]),
)
def to_json_dict(self):
return {
"targets": [tgt.to_json_dict() for tgt in self.targets],
}
class SourceItemKind(IntEnum):
FILE = 1
DIRECTORY = 2
@dataclass(frozen=True)
class SourceItem:
uri: Uri
kind: SourceItemKind
generated: bool = False
@classmethod
def from_json_dict(cls, d: Any):
return cls(
uri=d["uri"],
kind=SourceItemKind(d["kind"]),
generated=d["generated"],
)
def to_json_dict(self):
return {
"uri": self.uri,
"kind": self.kind.value,
"generated": self.generated,
}
@dataclass(frozen=True)
class SourcesItem:
target: BuildTargetIdentifier
sources: tuple[SourceItem, ...]
roots: tuple[Uri, ...] | None
@classmethod
def from_json_dict(cls, d: Any):
return cls(
target=BuildTargetIdentifier.from_json_dict(d["target"]),
sources=tuple(SourceItem.from_json_dict(i) for i in d["sources"]),
roots=tuple(d.get("sources", ())),
)
def to_json_dict(self):
result = {
"target": self.target.to_json_dict(),
"sources": [src.to_json_dict() for src in self.sources],
}
if self.roots is not None:
result["roots"] = list(self.roots)
return result
@dataclass(frozen=True)
class SourcesResult:
items: tuple[SourcesItem, ...]
@classmethod
def from_json_dict(cls, d: Any):
return cls(
items=tuple(SourcesItem.from_json_dict(i) for i in d["items"]),
)
def to_json_dict(self):
return {"items": [item.to_json_dict() for item in self.items]}
# -----------------------------------------------------------------------------------------------
# Dependency Sources Request
# See https://build-server-protocol.github.io/docs/specification.html#dependency-sources-request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class DependencySourcesParams:
targets: tuple[BuildTargetIdentifier, ...]
@classmethod
def from_json_dict(cls, d):
return cls(
targets=tuple(BuildTargetIdentifier.from_json_dict(x) for x in d["targets"]),
)
def to_json_dict(self):
return {
"targets": [tgt.to_json_dict() for tgt in self.targets],
}
@dataclass(frozen=True)
class DependencySourcesItem:
target: BuildTargetIdentifier
# List of resources containing source files of the
# target's dependencies.
# Can be source files, jar files, zip files, or directories.
sources: tuple[Uri, ...]
def to_json_dict(self) -> dict[str, Any]:
return {
"target": self.target.to_json_dict(),
"sources": self.sources,
}
@dataclass(frozen=True)
class DependencySourcesResult:
items: tuple[DependencySourcesItem, ...]
def to_json_dict(self):
return {"items": [item.to_json_dict() for item in self.items]}
# -----------------------------------------------------------------------------------------------
# Dependency Modules Request
# See https://build-server-protocol.github.io/docs/specification.html#dependency-modules-request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class DependencyModulesParams:
targets: tuple[BuildTargetIdentifier, ...]
@classmethod
def from_json_dict(cls, d):
return cls(
targets=tuple(BuildTargetIdentifier.from_json_dict(x) for x in d["targets"]),
)
def to_json_dict(self):
return {
"targets": [tgt.to_json_dict() for tgt in self.targets],
}
@dataclass(frozen=True)
class DependencyModule:
# Module name
name: str
# Module version
version: str
# Language-specific metadata about this module.
# See MavenDependencyModule as an example.
data: BSPData | None
def to_json_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {
"name": self.name,
"version": self.version,
}
if self.data is not None:
result["dataKind"] = self.data.DATA_KIND
result["data"] = self.data.to_json_dict()
return result
@dataclass(frozen=True)
class DependencyModulesItem:
target: BuildTargetIdentifier
modules: tuple[DependencyModule, ...]
def to_json_dict(self) -> dict[str, Any]:
return {
"target": self.target.to_json_dict(),
"modules": [m.to_json_dict() for m in self.modules],
}
@dataclass(frozen=True)
class DependencyModulesResult:
items: tuple[DependencyModulesItem, ...]
def to_json_dict(self):
return {"items": [item.to_json_dict() for item in self.items]}
|
import unittest
from katas.beta.print_that_calendar import show_calendar
class ShowCalendarTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(
show_calendar(2001, 10),
' October 2001\n'
'Mo Tu We Th Fr Sa Su\n'
' 1 2 3 4 5 6 7\n'
' 8 9 10 11 12 13 14\n'
'15 16 17 18 19 20 21\n'
'22 23 24 25 26 27 28\n'
'29 30 31\n'
)
def test_equal_2(self):
self.assertEqual(
show_calendar(2016, 5),
' May 2016\n'
'Mo Tu We Th Fr Sa Su\n'
' 1\n'
' 2 3 4 5 6 7 8\n'
' 9 10 11 12 13 14 15\n'
'16 17 18 19 20 21 22\n'
'23 24 25 26 27 28 29\n'
'30 31\n'
)
def test_equal_3(self):
self.assertEqual(
show_calendar(2015, 12),
' December 2015\n'
'Mo Tu We Th Fr Sa Su\n'
' 1 2 3 4 5 6\n'
' 7 8 9 10 11 12 13\n'
'14 15 16 17 18 19 20\n'
'21 22 23 24 25 26 27\n'
'28 29 30 31\n'
)
|
# Generated by Django 1.10.7 on 2017-11-23 15:53
from django.db import migrations
def move_relaydomain_to_transport(apps, schema_editor):
"""Transform relaydomains to transports."""
RelayDomain = apps.get_model("relaydomains", "RelayDomain")
RecipientAccess = apps.get_model("relaydomains", "RecipientAccess")
Transport = apps.get_model("transport", "Transport")
ra_to_create = []
for rd in RelayDomain.objects.select_related("domain", "service"):
next_hop = "[{}]:{}".format(rd.target_host, rd.target_port)
tr = Transport.objects.create(
pattern=rd.domain.name,
service="relay",
next_hop=next_hop,
_settings={
"relay_target_host": rd.target_host,
"relay_target_port": rd.target_port,
"relay_verify_recipients": rd.verify_recipients
}
)
rd.domain.transport = tr
rd.domain.save(update_fields=["transport"])
if not rd.verify_recipients:
continue
ra_to_create.append(
RecipientAccess(
pattern=rd.domain.name, action="reject_unverified_recipient"))
RecipientAccess.objects.bulk_create(ra_to_create)
def forward(apps, schema_editor):
"""Empty."""
pass
class Migration(migrations.Migration):
dependencies = [
('relaydomains', '0007_recipientaccess'),
('transport', '0001_initial'),
('admin', '0011_domain_transport'),
]
operations = [
migrations.RunPython(move_relaydomain_to_transport, forward)
]
|
import tkinter as tk
from tkinter import messagebox
from tkinter.constants import LEFT
import tkinter.font as tkFont
import GetImage as GM
from tkinter.messagebox import askokcancel, showinfo, WARNING
def main(root,user,token,id,Room_ID):
root.title("Room Preview")
width=750
height=600
screenwidth = root.winfo_screenwidth()
screenheight = root.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
root.geometry(alignstr)
root.resizable(width=False, height=False)
BgImg = GM.getImage("D:\Programming\Python\Room_Rental\Images\BG.jpg", 744, 592)
BGLabel=tk.Label(root,image=BgImg)
BGLabel.image=BgImg; BGLabel["bg"] = "#393d49"
BGLabel.place(x=3,y=2,width=744,height=594)
Divider1=tk.Label(root)
Divider1["bg"] = "#90ee90"
Divider1["fg"] = "#333333"
Divider1["justify"] = "center"
Divider1.place(x=0,y=170,width=744,height=3)
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\Logo.png",172,100)
Logo=tk.Button(root,image=img)
Logo.image=img
Logo["bg"] = "#f6f5f4"
Logo["justify"] = "center"
Logo.place(x=20,y=60,width=172,height=100)
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\preview1.jpg",150,150)
Title=tk.Button(root,image=img,compound=LEFT)
Title.image=img
Title["bg"] = "#34d8eb"
ft = tkFont.Font(family='Times',size=25)
Title["font"] = ft
Title["fg"] = "#000000"
Title["justify"] = "center"
Title["text"] = "Aspires Room Rental\n\nRoom Preview"
Title.place(x=210,y=10,width=523,height=150)
Back=tk.Button(root)
Back["bg"] = "#eb8c34"
ft = tkFont.Font(family='Times',size=16)
Back["font"] = ft
Back["fg"] = "#000000"
Back["justify"] = "center"
Back["text"] = "Back"
Back.place(x=20,y=10,width=172,height=43)
Back["command"] = lambda : Back_command(root,user,token,id)
Name=tk.Label(root,highlightbackground = "yellow", highlightcolor= "yellow", highlightthickness=2)
Name["bg"] = "#393d49"
Name["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=16)
Name["font"] = ft
Name["fg"] = "#ffffff"
Name["justify"] = "center"
Name["text"] = "Name"
Name.place(x=320,y=180,width=410,height=45)
Location=tk.Text(root)
Location["bg"] = "#ffffff"
Location["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=14)
Location["font"] = ft
Location["fg"] = "#000000"
Location.place(x=320,y=280,width=410,height=90)
City=tk.Label(root,highlightbackground = "yellow", highlightcolor= "yellow", highlightthickness=2)
City["bg"] = "#393d49"
City["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=16)
City["font"] = ft
City["fg"] = "#ffffff"
City["justify"] = "center"
City["text"] = ""
City.place(x=320,y=232,width=410,height=40)
Amenities=tk.Text(root)
Amenities["bg"] = "#ffffff"
Amenities["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=14)
Amenities["font"] = ft
Amenities["fg"] = "#000000";
Amenities.place(x=320,y=490,width=410,height=90)
Type=tk.Label(root)
Type["bg"] = "#ffffff"
Type["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=14)
Type["font"] = ft
Type["fg"] = "#000000"
Type["justify"] = "center"
Type["text"] = "Rent"
Type.place(x=370,y=380,width=190,height=45)
Rent=tk.Label(root)
Rent["bg"] = "#ffffff"
Rent["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=14)
Rent["font"] = ft
Rent["fg"] = "#000000"
Rent["justify"] = "center"
Rent["text"] = "Rent"
Rent.place(x=635,y=380,width=95,height=45)
RentLabel=tk.Label(root)
RentLabel["bg"] = "#393d49"
ft = tkFont.Font(family='Times',size=13)
RentLabel["font"] = ft
RentLabel["fg"] = "#ffffff"
RentLabel["justify"] = "center"
RentLabel["text"] = "Rent"
RentLabel.place(x=575,y=380,width=60,height=45)
TypeLabel=tk.Label(root)
TypeLabel["bg"] = "#393d49"
ft = tkFont.Font(family='Times',size=13)
TypeLabel["font"] = ft
TypeLabel["fg"] = "#ffffff"
TypeLabel["justify"] = "center"
TypeLabel["text"] = "Type"
TypeLabel.place(x=320,y=380,width=50,height=45)
if token == 'S' :
Book=tk.Button(root)
Book["bg"] = "#46eb43"
Book["borderwidth"] = "4px"
ft = tkFont.Font(family='Times',size=16)
Book["font"] = ft
Book["fg"] = "#2e3436"
Book["justify"] = "center"
Book["text"] = "Proceed to Book"
Book.place(x=10,y=500,width=295,height=52)
Book["command"] = lambda : Book_command(root,user,token,id,Room_ID,Rent["text"])
img = GM.getImage("D:\Programming\Python\Room_Rental\Images\search.jpg",297,300)
Photo=tk.Button(root,image=img)
Photo.image = img
Photo["bg"] = "#f6f5f4"
ft = tkFont.Font(family='Times',size=10)
Photo["font"] = ft
Photo["fg"] = "#2e3436"
Photo["justify"] = "center"
Photo["text"] = "Image of room"
Photo.place(x=10,y=180,width=300,height=300)
Owner=tk.Label(root)
Owner["bg"] = "#393d49"
Owner["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=14)
Owner["font"] = ft
Owner["fg"] = "#ffffff"
Owner["justify"] = "center"
Owner["text"] = ""
Owner.place(x=320,y=440,width=240,height=37)
Contact=tk.Label(root)
Contact["bg"] = "#393d49"
Contact["borderwidth"] = "1px"
ft = tkFont.Font(family='Times',size=14)
Contact["font"] = ft
Contact["fg"] = "#ffffff"
Contact["justify"] = "center"
Contact["text"] = ""
Contact.place(x=575,y=440,width=155,height=36)
FillPage(Room_ID,Name,Location,City,Amenities,Type,Rent,Owner,Contact,Photo)
def Back_command(root,user,token,id):
if token == 'S' : import StudentHome as SH; SH.main(root,user,id)
else : import OwnerHome as OH; OH.main(root,user,id)
def Book_command(root,user,token,id,Room_ID,p):
answer = askokcancel( title='Booking Confirmation', message='Are you sure about Booking the Room ? (you cannot cancel once booked)', icon=WARNING)
if answer :
import DatabaseConnection as DB
room_name = DB.runQuery2("select name from Room where r_id = "+str(Room_ID))[0]
from datetime import datetime; now = datetime.now();
formatted_date = now.strftime('%Y-%m-%d %H:%M:%S')
bid = 1
try : bid = int(DB.runQuery("select max(b_id) from Booking")[0][0]) + 1
except : bid = 1
owner_id = DB.runQuery2("select owner_id from Room where r_id = "+str(Room_ID))[0]
DB.insertBooking(int(bid),formatted_date,int(p),int(owner_id),int(id),int(Room_ID))
DB.runQuery2("update Booking set b_date = now() where room_id = "+str(Room_ID))
messagebox.showinfo("Success !", "Room named {} booked for {}".format(room_name,user))
import StudentHome as Home; Home.main(root,user,id)
def FillPage(Room_ID,Name,Location,City,Amenities,Type,Rent,Owner,Contact,Photo) :
import DatabaseConnection as DB
Tup = DB.runQuery2("select name, location, city, Amenities, type, price, owner_name from Room where r_id = %s" % (Room_ID))
#contact = str(DB.runQuery2("select mobile from Owner where o_id = (select owner_id from Room where r_id = "+str(Room_ID)+")"))[1:11]
contact = str(DB.runQuery2("select mobile from Owner where o_id = (select owner_id from Room where r_id = %s)" % (Room_ID)))[1:11] #cloud
Name["text"] = Tup[0]; City["text"] = Tup[2]; Type["text"] = Tup[4]; Rent["text"] = Tup[5]
Owner["text"] = "Owner : "+Tup[6]; Contact["text"] = "(+91) "+contact
Location.insert(tk.END,Tup[1])
Amenities.insert(tk.END,Tup[3])
Location.config(state="disabled")
Amenities.config(state="disabled")
import mysql.connector
from PIL import Image, ImageTk
from io import BytesIO
import PIL.Image
#cnx = mysql.connector.connect(host="localhost", user="root",password="", database="RoomRental")
#cnx = mysql.connector.connect(user="ugqiri0xcve8arnj", password="W05Xj0GMrQfciurwXyku", host="b1d548joznqwkwny7elp-mysql.services.clever-cloud.com",database="b1d548joznqwkwny7elp")
cnx = mysql.connector.connect(host = "remotemysql.com", user="o67DNqMxP5",password="JHs8dXYWg4", database="o67DNqMxP5")
cursor = cnx.cursor(buffered=True)
cursor.execute("select images from Room where r_id = %s" %(Room_ID))
data = cursor.fetchall()
img = data[0][0]
img1 = Image.open(BytesIO(img))
render = ImageTk.PhotoImage(img1.resize((297,300),Image.ANTIALIAS))
Photo.config(image=render)
Photo.image=render;
|
"""index file manager."""
import hashlib
import os
import json
from cryptarchive import constants
from cryptarchive.errors import FileNotFound
# ============ INDEX FORMAT =================
# as json:
# {
# "dirs": {
# name: {
# name: {
# "name": childname,
# "isdir": isdir,
# "id": id,
# }
# }
# ...
# },
# }
class Index(object):
"""An Index file manager"""
def __init__(self, indexdata):
self._index = indexdata
@classmethod
def new(cls):
"""create a new index."""
indexdata = {
"dirs": {
"/": {
},
},
}
return cls(indexdata)
@classmethod
def loads(cls, data):
"""
Load the Index from data.
:param data: data to load from
:type data: str
:return: cls loaded from data
:rtype: instance of cls
"""
content = json.loads(data)
return cls(content)
def dumps(self):
"""
Serializes this index.
:return: serialized data of this Index
:rtype: str
"""
return json.dumps(self._index)
def mkdir(self, path):
"""
Create a directory.
:param path: virtual path to create
:type path: str
"""
path = self.normalize_dir_path(path)
if self.dir_exists(path):
return
parent = self.normalize_dir_path(os.path.dirname(os.path.dirname(path)))
if parent in ("/", "\\", ""):
parent = "/"
if (not self.dir_exists(parent)) and (parent is not None):
self.mkdir(parent)
self._index["dirs"][path] = {}
if parent is not None:
self._index["dirs"][parent][path] = {
"name": os.path.basename(os.path.dirname(path)),
"isdir": True,
"id": path,
}
def dir_exists(self, path):
"""
check if the directory exists.
:param path: path to check
:type path: str
:return: wether the path exists or not
:rtype: boolean
"""
path = self.normalize_dir_path(path)
if path in ("/", "", "\\"):
return True
return (path in self._index["dirs"])
def file_exists(self, path):
"""
Check if the file exists.
:param path: path to check
:type path: str
:return: wether the path exists or not
:rtype: boolean
"""
parent = self.normalize_dir_path(os.path.dirname(path))
path = self.normalize_file_path(path)
fn = path # os.path.basename(path)
if parent not in self._index["dirs"]:
return False
if fn in self._index["dirs"][parent]:
return True
else:
return False
def listdir(self, path):
"""
Returns the content of the given path.
:param path: path to list
:type path: str
:return: [(name, isdir) of each file]
:rtype: list of tuples of (str, bool)
"""
path = self.normalize_dir_path(path)
if path not in self._index["dirs"]:
return []
ret = []
for name in self._index["dirs"][path]:
data = self._index["dirs"][path][name]
ret.append((data["name"], data["isdir"]))
return ret
def create_file(self, path):
"""
Create a new file.
Return the file id.
:param path: virtual path of the new file
:type path: str
:return: the new file id
:rtype: str
"""
path = self.normalize_file_path(path)
if self.file_exists(path):
return self.get_file_id(path)
parent = self.normalize_dir_path(os.path.dirname(path))
if not self.dir_exists(parent):
self.mkdir(parent)
fid = self.new_file_id(parent, path)
self._index["dirs"][parent][path] = {
"name": os.path.basename(path),
"isdir": False,
"id": fid,
}
return fid
def remove_from_index(self, path):
"""
Remove path from the index.
:param path: virtual path to remove from the index.
:type path: str
:return: list of removed fileids
:rtype: list of str
"""
removed = []
normalized = self.normalize_dir_path(path)
# if path is a directory, remove all children
if normalized in list(self._index["dirs"]):
for child in list(self._index["dirs"][normalized]):
removed += self.remove_from_index(child)
# remove all references to path
for dn in self._index["dirs"]:
dircontent = self._index["dirs"][dn]
for sp in dircontent.keys():
fid = self._index["dirs"][dn][sp]["id"]
isdir = self._index["dirs"][dn][sp]["isdir"]
if (sp == path) or (fid == path):
if not isdir:
removed.append(fid)
del self._index["dirs"][dn][sp]
return [self._encode(e) for e in removed]
def new_file_id(self, parentdir, name):
"""
Generate a new file id.
:param parentdir: the parent directory
:type parentdir: str
:param name: the filename
:type name: str
:return: the generated file id
:rtype: str
"""
parentdir = self.normalize_dir_path(parentdir)
fid = hashlib.sha256(parentdir + name).hexdigest()
return fid
def get_file_id(self, path):
"""
Return the file id for the file at path.
:param path: path to get fileid for
:type path: str
:return: the fileid of path
:rtype: str
"""
parent = self.normalize_dir_path(os.path.dirname(path))
path = self.normalize_file_path(path)
if parent not in self._index["dirs"]:
raise FileNotFound("No such directory: '{p}'!".format(p=parent))
if path not in self._index["dirs"][parent]:
raise FileNotFound("No such File: '{p}'!".format(p=path))
else:
return self._encode(self._index["dirs"][parent][path]["id"])
def _encode(self, s):
"""
Encode s into the index encoding.
:param s: string to encode
:type s: str or unicode
:return: encoded string
:rtype: str
"""
if isinstance(s, str):
return s
elif isinstance(s, unicode):
return s.encode(constants.INDEX_ENCODING)
else:
raise TypeError("Expected string or unicode, got {t}".format(t=type(s)))
def normalize_dir_path(self, path):
"""
Return a normalized directory path.
Example:
/test/ -> /test/
/test -> /test/
/test// -> /test/
test/ -> /test/
\\test\\ -> /test/
:param path: path to normalize
:type path: str
:return: the normalized path
:rtype: str
"""
# 1. ensure final slash
path = os.path.join(*os.path.split(path))
if not path.endswith(os.sep): # sep will be converted later
path += os.sep
# 2. always use '/' as seperator
path = path.replace(os.path.sep, "/")
# 3. remove multi slashes
while "//" in path:
path = path.replace("//", "/")
# 4.ensure root is "/":
if len(path) == 0:
path = "/"
# 5. ensure start slash
if not path.startswith("/"):
path = "/" + path
return path
def normalize_file_path(self, path):
"""
Return a normalized file path.
Example:
/test/file.txt -> /test/file.txt
/test//file.txt -> /test/file.txt
test/file.txt -> /test/file.txt
\\test\\file.txt -> /test/file.txt
:param path: path to normalize
:type path: str
:return: the normalized path
:rtype: str
"""
# 1. always use '/' as seperator
path = path.replace(os.path.sep, "/")
# 2. remove multi slashes
while "//" in path:
path = path.replace("//", "/")
# 3. ensure start slash
if not path.startswith("/"):
path = "/" + path
return path
def move(self, src, dest):
"""
Move src to dest.
:param src: the source path
:type src: str
:param dest: the destination path
:type dest: str
"""
src = self.normalize_file_path(src)
if not (self.dir_exists(src) or self.file_exists(src)):
raise FileNotFound("No such file or directory: '{p}'!".format(p=src))
dest = self.normalize_file_path(dest)
srcparent = self.normalize_dir_path(os.path.dirname(src))
destparent = self.normalize_dir_path(os.path.dirname(dest))
if not self.dir_exists(destparent):
self.mkdir(destparent)
olddata = self._index["dirs"][srcparent][src]
self._index["dirs"][destparent][dest] = {
"name": os.path.basename(dest),
"isdir": olddata["isdir"],
"id": olddata["id"],
}
self.remove_from_index(src)
|
"""
Placeholder plugin to test the plugin system.
"""
__help__ = 'Hello world. A throwaway module.'
def seed_parser(parser):
"""
Seed the parser.
"""
parser.add_argument('--awesome',dest='awesome',default='completely')
def seed_commands(commands):
"""
Seeds the commands.
"""
def awesome(args):
print 'I am',args.awesome,'awesome.'
commands['yo'] = {'__default__':awesome}
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
# CSS/Tag
html = urlopen("https://morvanzhou.github.io/static/scraping/list.html").read().decode("UTF-8")
print('here is the html structure: \n')
print(html)
soup = BeautifulSoup(html, features='lxml')
#month = soup.find_all('li', {'class': 'month'})
#for m in month:
# print(m.get_text()) # if not us get_text method 'print(m)', the tags will be printed
jan = soup.find_all('ul', {"class": 'jan'})
print('--'*20)
print(jan)
d_jan = jan.find_all('li')
for d in d_jan:
print(d.get_text())
|
import functools
import numpy as np
import pandas as pd
def handle_na(func):
"""Decorator for scalar function so it returns nan when nan is input"""
@functools.wraps(func)
def func_wrapper(arg, *args, **kwargs):
if pd.isna(arg):
return arg
return func(arg, *args, **kwargs)
func_wrapper.__doc__ = func.__doc__ if func.__doc__ else ""
func_wrapper.__doc__ += "\n@about: return numpy.nan if arg is nan\n"
return func_wrapper
def notna(obj):
"""Detect none missing values for an array-like or scalar object."""
return np.logical_not(pd.isna(obj))
|
import logging
import networkx as nx
import os
import time
class mp_network:
id = None
logger = None
cfg = {}
node_types = []
edge_types = []
graph = None
def __init__(self, config = None):
# initialize id
self.id = int(time.time() * 100)
# initialize logger
self.logger = logging.getLogger('metapath')
self.node_types = ['e', 'tf', 's']
self.edge_types = [('e', 'tf'), ('s', 'tf')]
# create empty network instance if no configuration is given
if config == None:
return
self.cfg = config
self.update()
#
# create NetworkX graph object
#
def update(self,
nodelist = {'type': None, 'list': []},
edgelist = {'type': (None, None), 'list': []}):
# update node list from function parameter
if nodelist['type'] in self.node_types:
self.cfg['nodes'][nodelist['type']] = nodelist['list']
# update edge list from function parameter
if edgelist['type'] in self.edge_types:
edge_type = edgelist['type'][0] + '-' + edgelist['type'][1]
self.cfg['edges'][edge_type] = edgelist['list']
# filter edges to valid nodes
for src_type, tgt_type in self.edge_types:
edge_type = src_type + '-' + tgt_type
filtered_edges = []
for src, tgt in self.cfg['edges'][edge_type]:
if not src in self.cfg['nodes'][src_type]:
continue
if not tgt in self.cfg['nodes'][tgt_type]:
continue
filtered_edges.append((src, tgt))
self.cfg['edges'][edge_type] = filtered_edges
# reset nodes, edges and name of graph
try:
self.graph.clear()
self.graph['name'] = self.cfg['name']
except:
self.graph = nx.Graph(name = self.cfg['name'])
# add nodes
sort_id = 0
for type_id, type in enumerate(self.node_types):
for type_node_id, node in enumerate(self.cfg['nodes'][type]):
id = type + ':' + node
if id in self.graph.nodes():
continue
self.graph.add_node(
id,
label = node,
sort_id = sort_id,
params = {
'type': type,
'type_id': type_id,
'type_node_id': type_node_id})
sort_id += 1
# add edges
sort_id = 0
for type_id, (src_type, tgt_type) in enumerate(self.edge_types):
edge_type = src_type + '-' + tgt_type
if not edge_type in self.cfg['edges']:
self.cfg['edges'][edge_type] = []
continue
for (src, tgt) in self.cfg['edges'][edge_type]:
src_node_id = src_type + ':' + src
tgt_node_id = tgt_type + ':' + tgt
self.graph.add_edge(
src_node_id, tgt_node_id,
weight = 0,
sort_id = sort_id,
params = {'type': edge_type, 'type_id': type_id})
sort_id += 1
#
# accessing nodes
#
def nodes(self, **params):
# filter search criteria and order entries
sorted_list = [None] * self.graph.number_of_nodes()
for node, attr in self.graph.nodes(data = True):
if not params == {}:
passed = True
for key in params:
if not key in attr['params'] \
or not params[key] == attr['params'][key]:
passed = False
break
if not passed:
continue
sorted_list[attr['sort_id']] = node
# filter empty nodes
filtered_list = []
for node in sorted_list:
if node:
filtered_list.append(node)
return filtered_list
def node_labels(self, **params):
list = []
for node in self.nodes(**params):
list.append(self.graph.node[node]['label'])
return list
def node(self, node):
return self.graph.node[node]
#
# accessing edges
#
def edges(self, **params):
# filter search criteria and order entries
sorted_list = [None] * self.graph.number_of_edges()
for src, tgt, attr in self.graph.edges(data = True):
if not params == {}:
passed = True
for key in params:
if not key in attr['params'] \
or not params[key] == attr['params'][key]:
passed = False
break
if not passed:
continue
sorted_list[attr['sort_id']] = (src, tgt)
# filter empty nodes
filtered_list = []
for edge in sorted_list:
if edge:
filtered_list.append(edge)
return filtered_list
def edge_labels(self, **params):
list = []
for src, tgt in self.edges(**params):
src_label = self.graph.node[src]['label']
tgt_label = self.graph.node[tgt]['label']
list.append((src_label, tgt_label))
return list
#
# get / set
#
def get(self):
return {
'id': self.id,
'cfg': self.cfg,
'node_types': self.node_types,
'edge_types': self.edge_types,
'graph': self.graph
}
def set(self, **params):
if 'id' in params:
self.id = params['id']
if 'cfg' in params:
self.cfg = params['cfg']
if 'node_types' in params:
self.node_types = params['node_types']
if 'edge_types' in params:
self.edge_types = params['edge_types']
if 'graph' in params:
self.graph = params['graph']
return True
## def get_node_labels(self, params = {}, type = None, type_id = None):
##
## nodes = self.get_nodes(type = type, type_id = type_id, params}
## self.update(
## e = config['e'], s = config['s'], tf = config['tf'],
## edges = config['s-tf'] + config['e-tf'])
## def update(self, e = [], tf = [], s = [], edges = []):
##
## # update nodes
## if e and tf and s:
## self.node = {}
##
## nodeindex = 0
## if e:
## for node in e:
## id = 'e:' + node
##
## if id in self.node:
## continue
##
## self.node[id] = {
## 'name': node,
## 'class': 'e',
## 'index': nodeindex}
##
## nodeindex += 1
## else:
## pass
## #e = self.node['e']
##
##
## quit()
##
## if tf:
## self.node['tf'] = list(set(tf))
## else:
## tf = self.node['tf']
##
## if s:
## self.node['s'] = list(set(s))
## else:
## s = self.node['s']
##
## self.nodes = list(set(s + e + tf))
##
## # update edges
## if edges:
## self.edges = list(set(edges))
##
## edges = []
## for (val1, val2) in self.edges:
## if (val1 in self.nodes) and (val2 in self.nodes):
## edges.append((val1, val2))
##
## self.edges = edges
##
## ## update matrices
## #self.update_matrices()
##
## ## update graph
## #self.update_graph()
def save_graph(self, file = None, format = 'gml'):
if file == None:
self.logger.error("no save path was given")
quit()
# create path if not available
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
# everythink seems to be fine
# self.logger.info("saving graph to %s" % (file))
if format == 'gml':
G = self.graph.copy()
nx.write_gml(G, file)
def plot(self, file = None,
edges = 'weights', draw_edge_labels = True, edge_threshold = 0.0,
nodes = 'labels', draw_node_captions = False,
caption = None, title = None, colors = None, dpi = 300):
# create path if not available
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
# check if python module 'pyplot' is available
try:
import matplotlib.pyplot as plt
except:
self.logger.critical("could not import python module 'pyplot'")
quit()
import numpy as np
# everything seems to be fine
## self.logger.info("saving graph plot to %s" % (file))
# calculate sizes
zoom = 1
scale = min(250.0 / max(
len(self.node['e']), len(self.node['tf']), len(self.node['s'])), 30.0)
graph_node_size = scale ** 2
graph_font_size = 0.4 * scale
graph_caption_factor = 0.5 + 0.003 * scale
graph_line_width = 0.5
# calculate node positions for 'stack layout'
pos = {}
for node, attr in self.graph.nodes(data = True):
i = 1.0 / len(self.node[attr['type']])
x = (self.node[attr['type']].index(attr['label']) + 0.5) * i
y = ['e', 'tf', 's'].index(attr['type']) * 0.5
pos[node] = (x, y)
# calculate node caption positions for 'stack layout'
pos_caption = {}
for node, attr in self.graph.nodes(data = True):
i = 1.0 / len(self.node[attr['type']])
x = (self.node[attr['type']].index(attr['label']) + 0.5) * i
y = (['e', 'tf', 's'].index(attr['type']) - 1) * graph_caption_factor + 0.5
pos_caption[node] = (x, y)
# create figure object
fig = plt.figure()
# draw labeled nodes
for node, attr in self.graph.nodes(data = True):
# calculate weight sum of node
if edges == 'weights':
weight_sum = 0
for (n1, n1, edge_attr) in self.graph.edges(nbunch = [node], data = True):
weight_sum += edge_attr['weight']
weight_sum = min(0.01 + 0.3 * weight_sum, 1)
elif edges == 'adjacency':
weight_sum = 1
# calculate rgba-color of node
c = 1 - weight_sum
if colors == None or colors == 'colors':
color = {
's': (1, c, c, 1),
'tf': (c, 1, c, 1),
'e': (1, 1, c, 1)
}[attr['type']]
elif colors == 'grey':
color = (0.3 + 2 * c / 3, 0.3 + 2 * c / 3, 0.3 + 2 * c / 3, 1)
# draw node
nx.draw_networkx_nodes(
self.graph, pos,
node_size = graph_node_size,
linewidths = graph_line_width,
nodelist = [node],
node_shape = 'o',
#alpha = weight_sum,
node_color = color)
# draw node label
node_font_size = 1.5 * graph_font_size / np.sqrt(max(len(node) - 1, 1))
nx.draw_networkx_labels(
self.graph, pos,
font_size = node_font_size,
labels = {node: attr['label']},
font_weight = 'normal')
# draw node caption
if draw_node_captions and not attr['type'] == 'tf':
approx = ' $' + '%d' % (100 * attr['approx']) + '\%$'
nx.draw_networkx_labels(
self.graph, pos_caption,
font_size = 0.65 * graph_font_size,
labels = {node: approx},
font_weight = 'normal')
# draw labeled edges
if edges == 'weights':
for (v, h) in self.graph.edges():
if colors == None or colors == 'colors':
if self.graph.edge[v][h]['value'] < 0:
color = 'red'
else:
color = 'green'
elif colors == 'grey':
color = 'black'
if self.graph.edge[v][h]['weight'] > edge_threshold:
nx.draw_networkx_edges(
self.graph, pos,
width = self.graph.edge[v][h]['weight'] * graph_line_width,
edgelist = [(v, h)],
edge_color = color,
alpha = 1)
if draw_edge_labels:
size = graph_font_size / 1.5
label = ' $' + ('%.2g' % (np.abs(self.graph.edge[v][h]['value']) * 100)) + '$'
nx.draw_networkx_edge_labels(
self.graph, pos,
edge_labels = {(v, h): label},
font_color = color,
clip_on = False,
font_size = size, font_weight = 'normal')
elif edges == 'adjacency':
for (v, h) in self.graph.edges():
nx.draw_networkx_edges(
self.graph, pos,
width = 1 * graph_line_width,
edgelist = [(v, h)],
alpha = 1)
# draw title
if title == None:
title = self.name
plt.figtext(.5, .92, title, fontsize = 10, ha = 'center')
# draw caption
if caption == None:
if edges == 'weights':
label_text = r'$\mathbf{Network:}\,\mathrm{%s}$' % (self.graph)
elif edges == 'adjacency':
label_text = r'$\mathbf{Network:}\,\mathrm{%s}$' % (self.graph)
plt.figtext(.5, .06, label_text, fontsize = 10, ha = 'center')
else:
plt.figtext(.5, .06, caption, fontsize = 10, ha = 'center')
plt.axis('off')
# save plot or show
if file == None:
plt.show()
else:
plt.savefig(file, dpi = dpi)
# clear current figure object and release memory
plt.clf()
plt.close(fig)
|
# works in Python 2 & 3
class _Singleton(type):
_instances = {}
def __call__(self, *args, **kwargs):
if self not in self._instances:
self._instances[self] = \
super(_Singleton, self).__call__(*args, **kwargs)
return self._instances[self]
class Singleton(_Singleton("SingletonMeta", (object,), {})):
pass
|
def testit(s):
return ' '.join(a.lower()[:-1] + a[-1].upper() for a in s.split())
|
class Solution:
# @param digits, a list of integer digits
# @return a list of integer digits
def plusOne(self, digits):
if len(digits) == 0:
return [1]
digits[len(digits)-1] += 1
carry = 0
for x in xrange(len(digits)-1,-1,-1):
if digits[x] == 10:
digits[x] = 0
if x != 0:
digits[x-1] += 1
else:
carry = 1
if carry == 1:
digits = [1] + digits
return digits
s = Solution()
print s.plusOne([9,9,9,9,9,9,9])
|
import os
import platform
if platform.system() == 'Windows':
texconv_path = f'{os.path.dirname(__file__)}/../resources/texconv.exe'
else:
raise Exception('Unknown Architecture')
def convert_texture(arg: str):
os.system(f'{texconv_path} {arg}')
|
import csv
import re
import sys
import pandas as pd
import numpy as np
import collections
"""
family id
"""
if __name__ == '__main__':
data = pd.read_csv('data/all_data_1.csv')
data['Family'] = pd.Series(str(np.nan), index=data.index)
for index,row in data.iterrows():
sirname = row['Name'].split(",")
family_id = sirname[0]+"_"+str(1 + row['Parch'] + row['SibSp'])
data.ix[index,'Family'] = str(family_id)
data.to_csv('data/all_data_2.csv',mode = 'w',index = False)
|
"""
2021年5月24日
矫正踢腿磁铁设计
现在的情况是这样的:
前偏转段优化后,不同动量分散下的相椭圆形状、Δx、Δy、Δxp都可以,唯独 Δxp 会变动,大约是 4mr/%
现在打算加入矫正踢腿磁铁
先看看没有动量分散下,全段情况
"""
# 因为要使用父目录的 cctpy 所以加入
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from work.A01run import *
from cctpy import *
def beamline_phase_ellipse_multi_delta(bl: Beamline, particle_number: int,
dps: List[float], describles: str = ['r-', 'y-', 'b-', 'k-', 'g-', 'c-', 'm-'],
foot_step: float = 20*MM, report: bool = True):
if len(dps) > len(describles):
print(
f'describles(size={len(describles)}) 长度应大于等于 dps(size={len(dps)})')
xs = []
ys = []
for dp in dps:
x, y = bl.track_phase_ellipse(
x_sigma_mm=3.5, xp_sigma_mrad=7.5,
y_sigma_mm=3.5, yp_sigma_mrad=7.5,
delta=dp, particle_number=particle_number,
kinetic_MeV=215, concurrency_level=16,
footstep=foot_step,
report=report
)
xs.append(x + [x[0]])
ys.append(y + [y[0]])
plt.subplot(121)
for i in range(len(dps)):
plt.plot(*P2.extract(xs[i]), describles[i])
plt.xlabel(xlabel='x/mm')
plt.ylabel(ylabel='xp/mr')
plt.title(label='x-plane')
plt.legend(['dp'+str(int(dp*1000)/10) for dp in dps])
plt.axis("equal")
plt.subplot(122)
for i in range(len(dps)):
plt.plot(*P2.extract(ys[i]), describles[i])
plt.xlabel(xlabel='y/mm')
plt.ylabel(ylabel='yp/mr')
plt.title(label='y-plane')
plt.legend(['dp'+str(int(dp*1000)/10) for dp in dps])
plt.axis("equal")
plt.show()
if __name__ == "__main__":
BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()
param = [5.498, -3.124, 30.539, 0.383,
84.148, 94.725, 82.377,
100.672, 72.283 , 99.973,
-9807.602, 9999.989 , 25.000, 24.000
]
qs1_g = param[0]
qs2_g = param[1]
qs1_s = param[2]
qs2_s = param[3]
dicct_tilt_1 = param[4]
dicct_tilt_2 = param[5]
dicct_tilt_3 = param[6]
agcct_tilt_0 = param[7]
agcct_tilt_2 = param[8]
agcct_tilt_3 = param[9]
dicct_current = param[10]
agcct_current = param[11]
agcct1_wn = int(param[12])
agcct2_wn = int(param[13])
qs1_gradient=qs1_g
qs2_gradient=qs2_g
qs1_second_gradient=qs1_s
qs2_second_gradient=qs2_s
qs1_aperture_radius=60*MM
qs2_aperture_radius=60*MM
dicct12_tilt_angles=[30, dicct_tilt_1, dicct_tilt_2, dicct_tilt_3]
agcct12_tilt_angles=[agcct_tilt_0, 30, agcct_tilt_2, agcct_tilt_3]
dicct12_current=dicct_current
agcct12_current=agcct_current
agcct1_winding_number=agcct1_wn
agcct2_winding_number=agcct2_wn
dicct12_winding_number=42
agcct1_bending_angle=22.5 * (agcct1_wn / (agcct1_wn + agcct2_wn))
agcct2_bending_angle=22.5 * (agcct2_wn / (agcct1_wn + agcct2_wn))
DL1=0.9007765
GAP1=0.4301517
GAP2=0.370816
qs1_length=0.2340128
qs2_length=0.200139
DL2=2.35011
GAP3=0.43188
qs3_length=0.24379
qs3_aperture_radius=60 * MM
qs3_gradient=-7.3733
qs3_second_gradient=-45.31 * 2
agcct12_inner_small_r=92.5 * MM - 20 * MM # 92.5
agcct12_outer_small_r=108.5 * MM - 20 * MM # 83+15
dicct12_inner_small_r=124.5 * MM - 20 * MM # 83+30+1
dicct12_outer_small_r=140.5 * MM - 20 * MM # 83+45 +2
dicct345_tilt_angles=[30, 88.773, 98.139, 91.748]
agcct345_tilt_angles=[101.792, 30, 62.677, 89.705]
dicct345_current=9409.261
agcct345_current=-7107.359
agcct3_winding_number=25
agcct4_winding_number=40
agcct5_winding_number=34
agcct3_bending_angle=-67.5 * (25 / (25 + 40 + 34))
agcct4_bending_angle=-67.5 * (40 / (25 + 40 + 34))
agcct5_bending_angle=-67.5 * (34 / (25 + 40 + 34))
agcct345_inner_small_r=92.5 * MM + 0.1*MM # 92.5
agcct345_outer_small_r=108.5 * MM + 0.1*MM # 83+15
dicct345_inner_small_r=124.5 * MM + 0.1*MM # 83+30+1
dicct345_outer_small_r=140.5 * MM + 0.1*MM # 83+45 +2
dicct345_winding_number=128
part_per_winding=120
deltas = BaseUtils.list_multiply([-4,-2,0,2,4],0.01)
fields = [0,-0.05,-0.05,-0.1,-0.07]
cs = ['r-', 'y-', 'b-', 'k-', 'g-', 'c-', 'm-']
for i in range(len(fields)):
straight_dipole_magnet_filed = fields[i]
bl = Beamline = (
Beamline.set_start_point(P2.origin()) # 设置束线的起点
# 设置束线中第一个漂移段(束线必须以漂移段开始)
.first_drift(direct=P2.x_direct(), length=DL1)
.append_agcct( # 尾接 acgcct
big_r=0.95, # 偏转半径
# 二极 CCT 和四极 CCT 孔径
small_rs=[dicct12_outer_small_r, dicct12_inner_small_r,
agcct12_outer_small_r, agcct12_inner_small_r],
bending_angles=[agcct1_bending_angle,
agcct2_bending_angle], # agcct 每段偏转角度
tilt_angles=[dicct12_tilt_angles,
agcct12_tilt_angles], # 二极 CCT 和四极 CCT 倾斜角
winding_numbers=[[dicct12_winding_number], [
agcct1_winding_number, agcct2_winding_number]], # 二极 CCT 和四极 CCT 匝数
# 二极 CCT 和四极 CCT 电流
currents=[dicct12_current, agcct12_current],
disperse_number_per_winding=part_per_winding # 每匝分段数目
)
.append_drift(GAP1) # 尾接漂移段
.append_qs( # 尾接 QS 磁铁
length=qs1_length,
gradient=qs1_gradient,
second_gradient=qs1_second_gradient,
aperture_radius=qs1_aperture_radius
)
.append_drift(GAP2)
.append_qs(
length=qs2_length,
gradient=qs2_gradient,
second_gradient=qs2_second_gradient,
aperture_radius=qs2_aperture_radius
)
.append_drift(GAP2)
.append_qs(
length=qs1_length,
gradient=qs1_gradient,
second_gradient=qs1_second_gradient,
aperture_radius=qs1_aperture_radius
)
.append_drift(GAP1)
.append_agcct(
big_r=0.95,
small_rs=[dicct12_outer_small_r, dicct12_inner_small_r,
agcct12_outer_small_r, agcct12_inner_small_r],
bending_angles=[agcct2_bending_angle,
agcct1_bending_angle],
tilt_angles=[dicct12_tilt_angles,
agcct12_tilt_angles],
winding_numbers=[[dicct12_winding_number], [
agcct2_winding_number, agcct1_winding_number]],
currents=[dicct12_current, agcct12_current],
disperse_number_per_winding=part_per_winding
)
.append_drift(DL1-0.1)
.append_straight_dipole_magnet(
magnetic_field=straight_dipole_magnet_filed,
length=0.2,
aperture_radius=60*MM
)
# 第二段
.append_drift(DL2-0.1)
.append_agcct(
big_r=0.95,
small_rs=[dicct345_outer_small_r, dicct345_inner_small_r,
agcct345_outer_small_r, agcct345_inner_small_r],
bending_angles=[agcct3_bending_angle,
agcct4_bending_angle, agcct5_bending_angle],
tilt_angles=[dicct345_tilt_angles,
agcct345_tilt_angles],
winding_numbers=[[dicct345_winding_number], [
agcct3_winding_number, agcct4_winding_number, agcct5_winding_number]],
currents=[dicct345_current, agcct345_current],
disperse_number_per_winding=part_per_winding
)
.append_drift(GAP3)
.append_qs(
length=qs3_length,
gradient=qs3_gradient,
second_gradient=qs3_second_gradient,
aperture_radius=qs3_aperture_radius
)
.append_drift(GAP3)
.append_agcct(
big_r=0.95,
small_rs=[dicct345_outer_small_r, dicct345_inner_small_r,
agcct345_outer_small_r, agcct345_inner_small_r],
bending_angles=[agcct5_bending_angle,
agcct4_bending_angle, agcct3_bending_angle],
tilt_angles=[dicct345_tilt_angles,
agcct345_tilt_angles],
winding_numbers=[[dicct345_winding_number], [
agcct5_winding_number, agcct4_winding_number, agcct3_winding_number]],
currents=[dicct345_current, agcct345_current],
disperse_number_per_winding=part_per_winding
)
.append_drift(DL2)
)
xs,ys = bl.track_phase_ellipse(
x_sigma_mm=3.5,xp_sigma_mrad=7.5,
y_sigma_mm=3.5,yp_sigma_mrad=7.5,
delta=deltas[i],
particle_number=8,
kinetic_MeV=215,
concurrency_level=16,
footstep=20*MM
)
Plot2.plot_p2s(xs,describe=cs[i],circle=True)
Plot2.equal()
Plot2.legend(*[str(int(deltas[i]*100))+"%:"+str(fields[i])+"T" for i in range(len(fields))])
Plot2.info("x/mm","y/mm","")
Plot2.show()
|
from common.run_method import RunMethod
import allure
@allure.step("极运营/前台业务/批量转班/查询转班学生")
def classChange_student_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/批量转班/查询转班学生"
url = f"/service-profile/classChange/student"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/批量转班/查询转班费用课时")
def classChange_detail_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/批量转班/查询转班费用课时"
url = f"/service-profile/classChange/detail"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/批量转班/查询转班记录")
def classChange_query_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/批量转班/查询转班记录"
url = f"/service-profile/classChange/query"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/批量转班/批量转班")
def classChange_change_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/批量转班/批量转班"
url = f"/service-profile/classChange/change"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/转班/个人转班")
def classChange_query_class_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/转班/个人转班"
url = f"/service-profile/classChange/query/class"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/前台业务/批量转班/转班校验")
def classChange_validateChangeClassOrder_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/前台业务/批量转班/转班校验"
url = f"/service-profile/classChange/validateChangeClassOrder"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 13:24:09 2018
@author: Markus Meister
@instute: University Oldenburg (Olb)
@devision: Machine Learning
@faculty:FVI Math.&Nat.Sci.
"""
#%% imports
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd, optim
import numpy as np
#from OneHotEncoder import one_hot_encode
#%% prefix
if 'xrange' not in locals():
def xrange(mx,mn=0):
return range(mn,mx)
globals()['xrange'] = xrange
#%% MLP class
class Net(nn.Sequential):
def __init__(self, n_features=2, n_h=[], n_Classes=2,mu=0,std=.25,
early_stopping=True, tol=.5e-08, loss_fun='CrossEntropy',
validation_frac=.1,lafcns=None,ltypes=None,max_iter=250,
opt='Adam',learning_rate=0.001,batch_size='auto',fc_init=0,
layers=None, warm_start=True):
super(Net,self).__init__() #Net, self
#std loss
#self.loss_fun = torch.nn.CrossEntropyLoss()
if type(n_h).__name__ == 'str':
n_h = eval(n_h)
n_h = np.array(n_h)
n_units = np.append(np.append(np.array(n_features), n_h), n_Classes).tolist()
if ltypes == None and not fc_init:
fc_init = 0
self.n_layers = len(n_h) + 1
elif ltypes != None and fc_init == 1:
fc_init = True
self.n_layers = np.min(\
len(ltypes['type']),\
len(ltypes['args'])\
)
if layers != None:
fc_init = 3
self.n_layers = len(layers)
if fc_init != 3:
if lafcns == None and fc_init != 3:
lafcns = np.repeat('relu',len(n_h)+1).tolist()
elif fc_init and layers != None and fc_init != 3:
if len(lafcns) < len(layers):
dl = np.abs(len(lafcns) - len(layers))
lafcns = np.array(lafcns)
lafcns = np.append(lafcns, np.repeat(lafcns[-1],dl))
if ltypes == None and fc_init != 3:
ltypes = {}
ltypes['type'] = np.repeat('Linear',len(n_h)+1).tolist()
ltypes['args'] = {}
if not hasattr(ltypes,'args',) and fc_init != 3:
ltypes['args'] = {}
if ltypes != None and len(ltypes['args']) < len(ltypes['type']) and fc_init != 3:
for j in range(1,len(n_h)+2):
ltypes['args'][j-1] = "%d,%d" \
%(int(n_units[j-1]),int(n_units[j]))
#self.lafcns = lafcns
#self.ltypes = ltypes['name']
#parse loss function
# if loss_fun != 'default' and loss_fun != None:
# if type(loss_fun) == str:
# self.loss_fun = loss_fun.strip('Loss')
# #exec('self.loss_fun = torch.nn.%sLoss()' %(loss_fun))
# elif type(loss_fun).__name__.strip('function') !=\
# type(loss_fun).__name__\
# or type(loss_fun).__name__.strip('method') !=\
# type(loss_fun).__name__:
# self.loss_fun = loss_fun
# else:
# self.loss_fun = torch.nn.MSELoss()
if tol != None:
self.tol = tol
else:
self.tol = 1e-8
n_hl = len(n_h)
if fc_init <= 2:
# for each layer with given data
for l in range(len(ltypes['type'])):
# exec('self.%s = nn.%s(%s)' \
# %(\
# ltypes['type'][l],\
# ltypes['args'][l]\
# )\
# )
###################################################################################################################################################################################
self.add_module(\
"(%d)" %(l),\
eval(\
'nn.%s(%s)' \
%(ltypes['type'],\
ltypes['args'][l])\
)\
)
# exec('torch.nn.init.normal(self.%s.weight, mean=mu, std=std)' \
# %ltypes['name'][l]\
# )
else:
# for each given layer from dict
for l in layers:
self.add_module(l,layers[l])
#exec('torch.nn.init.normal(self.%s.weight, mean=mu, std=std)' \
# %l\
# )
# if fc_init and fc_init < 2:
# #self.ltypes = ltypes
# #self.lafcns = lafcns
# self.ini_fwd_types()
# if fc_init > 1:
# self.ini_fwd_layer()
self.n_hl = n_hl
self.n_h = n_h
self.n_features = n_features
self.n_Classes = n_Classes
self.validation_frac = early_stopping*validation_frac
self.ealy_stopping = early_stopping
self.max_iter = max_iter
self.learning_rate = learning_rate
self.batch_size = batch_size
self.warm_start = warm_start
self.c_losses = np.array([
'CrossEntropy',
'NLL',
],dtype=str)
if type(opt).__name__ == 'str':
exec(\
'self.opt=optim.%s(%s,lr=%f)' \
%(\
opt,\
'params=self.parameters()',\
learning_rate\
)\
)
else:
self.opt = opt
def fit(self,x,l,loss_fun=None,opt=None, batch_size=None, epochs=None, verb=False):
l = l.squeeze()
if batch_size == None:
batch_size = self.batch_size
n_total = x.shape[0]
D = x.shape[1]
N = n_total
assert D == self.n_features
if batch_size == 'auto' or type(batch_size).__name__ == 'str':
batch_size = N
if loss_fun == None:
try:
losses = eval('torch.nn.%sLoss()' %(self.loss_fun))
loss_fun = self.loss_fun
except:
losses = torch.nn.MSELoss()
loss_fun = 'MSE'
if np.sum(self.c_losses == loss_fun) == 0 \
and len(l.shape) <= 1:
l = self.binarize(l,self.n_Classes)
if epochs == None:
epochs = self.max_iter
if opt == None:
opt = self.opt #optim.SGD(params=self.parameters(),lr=0.01)
i_shuffle = np.arange(n_total)
num_batches = int(n_total/batch_size)
this_loss = np.ones((epochs,1))
loss_valid = np.ones((epochs,1))
valid_size = int(batch_size*self.validation_frac)
bdata_size = batch_size - valid_size
e_tol = 0
v_tol = 0
for e in range(epochs):
# verbous print
if verb:
print('Epoch %s out of %s'%((e+1),epochs))
# data shuffle
np.random.shuffle(i_shuffle)
x=x[i_shuffle]
l=l[i_shuffle]
# data torchify
torch_data = autograd.Variable(torch.from_numpy(x).float())
if np.sum(self.c_losses == loss_fun) != 0:
torch_labels = autograd.Variable(torch.from_numpy(l)).long()
else:
torch_labels = autograd.Variable(torch.from_numpy(l)).float()
# batch train
for n in range(num_batches):
dat = torch_data[(batch_size*n):(batch_size*(n+1)),:]
lab = torch_labels[(batch_size*n):(batch_size*(n+1))]
if self.validation_frac > 0.0:
val = self(dat[-valid_size:])
out = self(dat[:bdata_size])
loss = losses(out,lab[:bdata_size])
self.zero_grad()
loss.backward()
opt.step()
#print(loss.data.mean())
this_loss[e] += loss.data.mean().numpy()
#print(loss)
if valid_size > 0:
loss_valid[e] = losses(val,lab[-valid_size:]).data.mean().numpy()
this_loss[e] = this_loss[e] /num_batches
if valid_size > 0:
loss_valid[e] = loss_valid[e] /num_batches
# verbos print
if verb:
print('current loss',this_loss[e])
if e > 0:
if np.abs((this_loss[e-1] - this_loss[e]))\
< self.tol:
e_tol += 1
else:
e_tol = 0
if valid_size > 0:
if loss_valid[e] - loss_valid[e-1] \
> self.tol:
v_tol += 1
else:
v_tol = 0
elif hasattr(self,'loss_valid_'):
if valid_size > 0:
if loss_valid[e] - self.loss_valid_ \
> self.tol:
v_tol += 1
# tolerance for 'convergence' reached
if e_tol >= 2 or v_tol >= 2:
break
if hasattr(self,'loss_valid_'):
self.loss_valid_ = min(np.mean(loss_valid[e]), self.loss_valid_)
else:
self.loss_valid_ = np.mean(loss_valid[e])
if hasattr(self,'loss_curve_'):
self.loss_curve_ = np.append(self.loss_curve_,this_loss)
else:
self.loss_curve_ = this_loss
if hasattr(self,'Iters_'):
self.Iters_ += e+1
else:
self.Iters_ = e+1
# def forward(self, V):
# return super().forward(V).squeeze()
def predict_proba(self,x):
x = self.tensor_check(x)
return self.forward(x)
def predict(self,x):
x = self.tensor_check(x)
return F.softmax(self.predict_proba(x),dim=-1)
def binarize(self,target,n_Classes=None):
if n_Classes == None:
n_Classes = np.unique(target).shape[0]
labels = np.zeros([target.shape[0],n_Classes],dtype=int)
for j in range(n_Classes):
labels[:,j] = target == j
return labels.astype(int)
def tensor_check(self,x):
if type(x).__name__ == 'ndarray':
x = autograd.Variable(torch.from_numpy(x).float())
return x
#%% example code
if __name__ == "__main__":
#%% load data
import matplotlib.pyplot as plt
# import os
# import tables
# import sys
#
# sys.path.append("../../Data/simple_clusters/")
#params=np.loadtxt('../../Data/simple_clusters/param_save.txt')
#storage=np.loadtxt('../../Data/simple_clusters/data_half_moon.txt')
params=np.loadtxt('param_save.txt')
storage=np.loadtxt('data_half_moon.txt')
data=storage[:,:2]
labels=storage[:,2]
n_features = data.shape[1]
n_Classes = np.unique(labels).shape[0]
#n_Classes=int(params[0])
#n_Cluster=int(params[1])
#n_per_Cluster=int(params[2])
#n_total=n_per_Cluster*n_Cluster
n_total = data.shape[0]
#n_h1=2*n_Cluster
n_h1 = 32
#%% define parameters
learning_rate = .001
opt_fun = 'Adam'#'SGD'
#%% instanciate MLP and optimizer
epochs = 50
batch_size = 10
# device = torch.device('cpu')
# if torch.cuda.is_available():
# device = torch.device('cuda')
nt_mods = {
'0':nn.Linear(n_features,n_h1),
'1':nn.Softsign(),
'2':nn.Linear(n_h1,n_h1),
'3':nn.Tanh(),
'4':nn.Linear(n_h1,n_h1*2),
'5':nn.LeakyReLU(),
'6':nn.Linear(n_h1*2,n_Classes),
'7':nn.ReLU(),
}
net = Net(n_features,[n_h1*2,n_h1*(1+n_Classes),n_h1*2],n_Classes,\
early_stopping=True,validation_frac=.1,
batch_size=batch_size,max_iter=epochs,\
layers=nt_mods,fc_init=0,tol=1e-10,opt='Adam'\
, loss_fun='CrossEntropy')
print(net)
# net = nn.Sequential()
# for ty in nt_mods:
# net.add_module(ty,nt_mods[ty])
# print(net)
#
# loss_fun='CrossEntropy'
# if type(loss_fun) == str:
# loss_fun = loss_fun.strip('Loss')
# exec('net.loss_fun = torch.nn.%sLoss()' %(loss_fun))
# opt = 'Adam'
# if type(opt).__name__ == 'str':
# exec(\
# 'opt=optim.%s(%s,lr=%f)' \
# %(\
# opt,\
# 'params=net.parameters()',\
# learning_rate\
# )\
# )
# net.opt = opt
#
# opt=None
# if opt == None:
# exec('opt=optim.%s(%s,lr=%f)' %(opt_fun,'params=net.parameters()',\
# learning_rate)\
# )
#
# net.batch_size=batch_size
# net.max_iter=epochs
# net.validation_frac=.25
# net.early_stopping=True
# net.tol=.5e-08
# net.n_features=n_features
# net.n_hidden = (n_h1*2,)
# net.n_Clases = n_Classes
#
# net.fit = Net.fit
#net.forward = Net.forward
#net.opt = opt
#%% convert data
point=autograd.Variable(\
torch.from_numpy(\
np.matrix(\
[[1.0,2.0],\
[2.0,3.0],\
[3.0,4.0]])\
).float()\
)
lab = labels
labels = np.repeat(np.zeros_like(lab)[:,np.newaxis],3,axis=1)
for j in range(n_Classes):
labels[:,j] = lab == j
labels = lab.astype(int)
torch_data = autograd.Variable( torch.from_numpy( data ) ).float()
torch_labels = autograd.Variable( torch.from_numpy( labels) ).double()
#%% start training
net.fit(data,labels,verb=True)
#%% accuracy test
# accuracy on test set
pred_labels = net(torch_data)
pred_labels = np.argmax(pred_labels.data.numpy(),axis=1)
print(pred_labels)
print(lab)
count_right = np.sum((pred_labels==lab.astype(int))).astype(float)
# verbose output
print('accuracy on train-set: %.4f' %(count_right/n_total))
#%% plots: pre-allocation
x_grid_min=np.amin(data[:,0])
x_grid_max=np.amax(data[:,0])
y_grid_min=np.amin(data[:,1])
y_grid_max=np.amax(data[:,1])
x_grid=200
y_grid=200
xx_em=np.linspace(x_grid_min,x_grid_max,x_grid)
yy_em=np.linspace(y_grid_min,y_grid_max,y_grid)
m=np.zeros((x_grid,y_grid))
col_count=1
for i in range(0,x_grid):
for j in range(0,y_grid):
point=autograd.Variable(\
torch.from_numpy(np.array([[xx_em[i]],\
[yy_em[j]]]).T).float()\
)
f=(net(point)).data.numpy()
for c in range(0,n_Classes):
if(np.amax(f)==f[0,c]):
m[y_grid-(j+1),i]=c
if(col_count*0.2<=i/x_grid):
print('Column %s of %s'%(i,x_grid))
col_count=col_count+1
#%% plots: images
plt.figure()
plt.imshow(m,interpolation='none',extent=[x_grid_min,x_grid_max,y_grid_min,y_grid_max])
plt.scatter(data[:,0],data[:,1],s=1,c=labels,cmap='Spectral')
#x_loss=np.arange(net.Iters_+1)
plt.figure()
plt.plot(np.arange(net.Iters_)+1,net.loss_curve_[:net.Iters_])
|
def binarySearch(arr, l, r, x):
#base case
if r >= l:
mid = int(l + (r-l)/2)
if arr[mid] == x:
return mid
elif arr[mid] > x:
'''
if element is smaller than element at mid
it can only be present in the left subarray
'''
return binarySearch(arr,l, mid - 1, x)
else:
#if element is bigger look in the right subarray
return binarySearch(arr, mid + 1, r, x)
else:
#element not present in the array
return -1
if __name__ == "__main__":
arr = [ 2, 3, 4, 10, 40 ]
x = 2
print(binarySearch(arr, 0, len(arr) -1, x))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtGui, QtCore
from main_widget import *
class MyMainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MyMainWindow, self).__init__(parent)
# self.form_widget = FormWidget(self)
# self.setCentralWidget(self.form_widget)
self.init()
def init(self):
textEdit = QtGui.QTextEdit()
self.setCentralWidget(textEdit)
exitAction = QtGui.QAction(QtGui.QIcon('img/menus/exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
self.toolbar = self.addToolBar('exit1')
self.toolbar.addAction(exitAction)
self.toolbar = self.addToolBar('exit2')
self.toolbar.addAction(exitAction)
self.statusbar = self.statusBar()
self.statusbar.showMessage('Ready')
self.setGeometry(300, 300, 500, 300)
# self.setg
self.setWindowTitle('Mushroom')
self.form_widget = FormWidget(self)
self.setCentralWidget(self.form_widget)
self.show()
if __name__ == '__main__':
app = QtGui.QApplication([])
foo = MyMainWindow()
foo.show()
sys.exit(app.exec_())
|
def read_access_point(interface):
'''Read the MAC address of the access point associated
with the network that @interface is on.
ARGS:
@interface -- The interface inquired about.
RETURNS:
@ap -- The MAC address of the interface's
network access point.
'''
from parse_iwconfig import parse_iwconfig
interfaces = parse_iwconfig()
for intrfc in interfaces:
if intrfc['interface'] == interface:
return interface['ap']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ed Mountjoy
#
import sys
import os
import argparse
import pandas as pd
from pprint import pprint
from collections import OrderedDict
from parquet_writer import write_parquet
def main():
# Parse args
args = parse_args()
# Load
df = pd.read_csv(args.inf, sep='\t', header=0)
# Decompose variant IDs
df[['A_chrom', 'A_pos', 'A_ref', 'A_alt']] = \
df.index_variantid_b37_A.str.split('_', expand=True)
df[['B_chrom', 'B_pos', 'B_ref', 'B_alt']] = \
df.index_variantid_b37_B.str.split('_', expand=True)
df['A_pos'] = df['A_pos'].astype(int)
df['B_pos'] = df['B_pos'].astype(int)
# Rename and select columns
cols = OrderedDict([
('study_id_A', 'A_study_id'),
('A_chrom', 'A_chrom'),
('A_pos', 'A_pos'),
('A_ref', 'A_ref'),
('A_alt', 'A_alt'),
('study_id_B', 'B_study_id'),
('B_chrom', 'B_chrom'),
('B_pos', 'B_pos'),
('B_ref', 'B_ref'),
('B_alt', 'B_alt'),
('set_type', 'set_type'),
('distinct_A', 'A_distinct'),
('overlap_AB', 'AB_overlap'),
('distinct_B', 'B_distinct')
])
df = ( df.loc[:, list(cols.keys())]
.rename(columns=cols) )
# Coerce data types
dtypes = OrderedDict([
('A_study_id', 'object'),
('A_chrom', 'object'),
('A_pos', 'Int64'),
('A_ref', 'object'),
('A_alt', 'object'),
('B_study_id', 'object'),
('B_chrom', 'object'),
('B_pos', 'Int64'),
('B_ref', 'object'),
('B_alt', 'object'),
('set_type', 'object'),
('A_distinct', 'Int64'),
('AB_overlap', 'Int64'),
('B_distinct', 'Int64')
])
assert(set(dtypes.keys()) == set(df.columns))
df = (
df.loc[:, dtypes.keys()]
.astype(dtype=dtypes)
)
# Sort
df = df.sort_values(
['A_study_id', 'A_chrom', 'A_pos', 'A_ref', 'A_alt', 'B_study_id',
'B_chrom', 'B_pos', 'B_ref', 'B_alt']
)
# Save as parquet
write_parquet(df,
args.outf,
compression='snappy',
flavor='spark')
def parse_args():
""" Load command line args """
parser = argparse.ArgumentParser()
parser.add_argument('--inf', metavar="<str>", help=('input'), type=str, required=True)
parser.add_argument('--outf', metavar="<str>", help=("Output"), type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
import os
from flask import (Flask, flash, render_template, redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key=os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
@app.route('/')
@app.route('/index')
def index():
return render_template("home.html")
@app.route('/sendmail')
def sendmail():
return render_template("sendMail.html")
@app.route('/get_reviews')
def get_reviews():
return render_template("drones_review.html", reviews=mongo.db.reviews.find())
@app.route('/add_review')
def add_review():
return render_template('add_drones_review.html' ,
categories=mongo.db.categories.find())
@app.route('/insert_review', methods=['POST'])
def insert_review():
reviews = mongo.db.reviews
reviews.insert_one(request.form.to_dict())
return redirect(url_for('get_reviews'))
@app.route('/edit_review/<review_id>')
def edit_review(review_id):
the_review = mongo.db.reviews.find_one({"_id": ObjectId(review_id)})
all_categories = mongo.db.categories.find()
return render_template('edit_review.html', review=the_review,
categories=all_categories)
@app.route('/update_review/<review_id>', methods=["POST"])
def update_review(review_id):
reviews = mongo.db.reviews
reviews.update( {'_id': ObjectId(review_id)},
{
'category_name': request.form.get('category_name'),
'name': request.form.get('name'),
'review': request.form.get('review')
})
return redirect(url_for('get_reviews'))
@app.route('/delete_review/<review_id>')
def delete_review(review_id):
mongo.db.reviews.remove({'_id': ObjectId(review_id)})
return redirect(url_for('get_reviews'))
if __name__ == '__main__':
app.run(host=os.environ.get('IP'),
port=int(os.environ.get('PORT')),
debug=True)
|
from typing import Optional, Sequence
from waitlist.storage.database import Waitlist, Character, Shipfit, WaitlistGroup, SolarSystem, Constellation, Station,\
Account
Optionalcharids = Optional[Sequence[int]]
def make_json_wl_entry(entry, exclude_fits: bool = False, include_fits_from: Optionalcharids = None,
scramble_names: bool = False, include_names_from: Optionalcharids = None):
response = {
'id': entry.id,
'character': make_json_character(entry.user_data, scramble_names=scramble_names,
include_names_from=include_names_from),
'time': entry.creation,
'missedInvites': entry.inviteCount
}
if not (exclude_fits and ((include_fits_from is None or entry.user is None) or entry.user not in include_fits_from)):
response['fittings'] = list(map(make_json_fitting, entry.fittings))
return response
def make_json_wl(dbwl: Waitlist, exclude_fits: bool = False, include_fits_from: Optionalcharids = None,
scramble_names: bool = False, include_names_from: Optionalcharids = None):
return {
'id': dbwl.id,
'name': dbwl.name,
'entries': make_entries(dbwl.entries, exclude_fits, include_fits_from, scramble_names=scramble_names,
include_names_from=include_names_from)
}
def make_json_character(dbcharacter: Character, scramble_names: bool = False,
include_names_from: Optionalcharids = None):
return {
'id': dbcharacter.get_eve_id() if not scramble_names or (
include_names_from is not None and dbcharacter.get_eve_id() in include_names_from) else None,
'name': dbcharacter.get_eve_name() if not scramble_names or (
include_names_from is not None and dbcharacter.get_eve_id() in include_names_from) else 'Name Hidden',
'newbro': dbcharacter.is_new
}
def make_json_fitting(dbfitting: Shipfit):
return {
'id': dbfitting.id,
'shipType': dbfitting.ship_type,
'shipName': dbfitting.ship.typeName,
'modules': dbfitting.modules,
'comment': dbfitting.comment,
# 'dna': dbfitting.get_dna(),
'wl_type': dbfitting.wl_type
}
def make_entries(dbentries, exclude_fits: bool = False, include_fits_from: Optionalcharids = None,
scramble_names: bool = False, include_names_from: Optionalcharids = None):
entries = []
for entry in dbentries:
entries.append(make_json_wl_entry(entry, exclude_fits, include_fits_from, scramble_names,
include_names_from=include_names_from))
return entries
def make_json_groups(groups: Sequence[WaitlistGroup]):
return [make_json_group(grp) for grp in groups]
def make_json_group(group: WaitlistGroup):
return {
'groupID': group.groupID,
'groupName': group.groupName,
'groupDisplayName': group.displayName, #
'influence': group.influence,
'status': group.status,
'enabled': group.enabled,
'fcs': make_json_fcs(group.fcs),
'managers': make_json_managers(group),
'station': make_json_station(group.dockup),
'solarSystem': make_json_solar_system(group.system),
'constellation': make_json_constellation(group.constellation),
'logiwlID': None if group.logilist is None else group.logilist.id,
'dpswlID': None if group.dpslist is None else group.dpslist.id,
'sniperwlID': None if group.sniperlist is None else group.sniperlist.id,
'otherwlID': None if group.otherlist is None else group.otherlist.id,
'xupwlID': None if group.xuplist is None else group.xuplist.id
}
def make_json_fcs(fcs: Sequence[Account]):
return [make_json_fc(fc) for fc in fcs if fc.current_char_obj is not None]
def make_json_fc(fc: Account):
return make_json_character(fc.current_char_obj)
def make_json_managers(group: WaitlistGroup):
if len(group.fleets) > 0:
return [make_json_character(fleet.comp.current_char_obj) for fleet in group.fleets if fleet.comp is not None and fleet.comp.current_char_obj is not None]
else:
return [make_json_character(manager.current_char_obj) for manager in group.manager if manager.current_char_obj is not None]
def make_json_solar_system(system: SolarSystem):
if system is None:
return None
return {
'solarSystemID': system.solarSystemID,
'solarSystemName': system.solarSystemName
}
def make_json_constellation(constellation: Constellation):
if constellation is None:
return None
return {
'constellationID': constellation.constellationID,
'constellationName': constellation.constellationName
}
def make_json_station(station: Station):
if station is None:
return None
return {
'stationID': station.stationID,
'stationName': station.stationName
}
def make_json_waitlists_base_data(waitlists: Sequence[Waitlist]):
return [make_json_waitlist_base_data(l) for l in waitlists]
def make_json_waitlist_base_data(waitlist: Waitlist):
return {
'id': waitlist.id,
'name': waitlist.name,
'groupID': waitlist.group.groupID,
'entryCount': len(waitlist.entries)
}
|
import pyaudio
import threading
import wave
from interface.lights import Lights
Thread = threading.Thread
FORMAT = pyaudio.paInt16
# CHANNELS = 2
CHANNELS = 1
# RATE = 44100
RATE = 16000
CHUNK = 1024
class Record(Thread):
def __init__(self, audio_interface):
Thread.__init__(self)
self.audio_interface = audio_interface
self.filedir = "soundfiles/"
self.filename = "new.wav"
self.stream = None
self.frames = []
self.tooShort = False
self.stopped = False
def run(self):
print('thread started')
self.stream = self.audio_interface.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
lights = Lights("change", {"r": 255, "g": 255, "b": 255, "intensity": 100, "duration": 0})
lights.start()
lights.join()
print("recording...")
self.frames = []
while not self.stopped:
data = self.stream.read(CHUNK)
self.frames.append(data)
# stop Recording
self.stream.stop_stream()
self.stream.close()
self.audio_interface.terminate()
wave_file = wave.open(self.filedir + self.filename, 'wb')
wave_file.setnchannels(CHANNELS)
wave_file.setsampwidth(self.audio_interface.get_sample_size(FORMAT))
wave_file.setframerate(RATE)
wave_file.writeframes(b''.join(self.frames))
# check duration
frames = wave_file.getnframes()
rate = wave_file.getframerate()
duration = frames / float(rate)
wave_file.close()
print("finished recording")
if duration < 1:
self.tooShort = True
print("wave too short")
def stop(self):
self.stopped = True
def join(self):
Thread.join(self)
if not self.tooShort:
return self.filename
else:
return False
|
import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foodev'.upper(), 'FOODEV')
|
# -*- coding: utf-8 -*-
from collections import defaultdict
from typing import List
class Solution:
def countPairs(self, nums: List[int], k: int) -> int:
occurrences, result = defaultdict(list), 0
for i, num in enumerate(nums):
if num in occurrences:
result += sum(i * j % k == 0 for j in occurrences[num])
occurrences[num].append(i)
return result
if __name__ == "__main__":
solution = Solution()
assert 4 == solution.countPairs([3, 1, 2, 2, 2, 1, 3], 2)
assert 0 == solution.countPairs([1, 2, 3, 4], 1)
|
'''
41. First Missing Positive
Given an unsorted integer array nums, find the smallest missing positive integer.
You must implement an algorithm that runs in O(n) time and uses constant extra space.
Example 1:
Input: nums = [1,2,0]
Output: 3
Example 2:
Input: nums = [3,4,-1,1]
Output: 2
Example 3:
Input: nums = [7,8,9,11,12]
Output: 1
Constraints:
0 <= nums.length <= 300
-231 <= nums[i] <= 231 - 1
'''
class Solution:
def firstMissingPositive(self, nums):
"""
Basic idea:
1. for any array whose length is l, the first missing positive must be in range [1,...,l+1],
so we only have to care about those elements in this range and remove the rest.
2. we can use the array index as the hash to restore the frequency of each number within
the range [1,...,l+1]
3. after removing all the numbers greater than or equal to n, all the numbers remaining are smaller than n.
If any number i appears, we add n to nums[i] which makes nums[i]>=n.
Therefore, if nums[i]<n, it means i never appears in the array and we should return i.
"""
nums.append(0)
n = len(nums)
for i in range(len(nums)):
# delete those useless elements
if nums[i] < 0 or nums[i] >= n:
nums[i] = 0
for i in range(len(nums)):
# use the index as the hash to record the frequency of each number
nums[nums[i] % n] += n
for i in range(1, len(nums)):
if nums[i] / n == 0:
return i
return n
def firstMissingPositiveV2(self, nums):
"""
instead of += n to index i, we can * -1 to indicates that number i appears in the list
"""
for i in range(len(nums)):
# remove all negative elements
if nums[i] < 0:
nums[i] = 0
for i in range(len(nums)):
val = abs(nums[i])
if 1 <= val <= len(nums):
if nums[val - 1] > 0:
nums[val - 1] *= -1 # nums[i] < 0 means number i+1 appears in the list
elif nums[val - 1] == 0:
nums[val - 1] = -1 * (len(nums) + 1) # any number > len(nums) works
# ignore those numbers that are larger than len(nums)
for i in range(1, len(nums) + 1):
if nums[i - 1] >= 0: # nums[i] >= 0 means number i+1 is missing
return i
# default case
return len(nums) + 1
|
import os
file_path = os.path.dirname(__file__)
# model_dir = os.path.join(file_path, 'chinese_L-12_H-768_A-12/')
model_dir = os.path.join('../chinese_L-12_H-768_A-12/')
config_name = os.path.join(model_dir, 'bert_config.json')
ckpt_name = os.path.join(model_dir, 'bert_model.ckpt')
output_dir = os.path.join(file_path, 'tmp/result/')
vocab_file = os.path.join(model_dir, 'vocab.txt')
data_dir = os.path.join(model_dir, '../data/')
DATA_COLUMN = "review"
LABEL_COLUMN = "label"
MAX_SEQ_LENGTH = 128
# 标签list
label_list = ["happy", "angry", "disgust", "sad"]
# Compute train and warmup steps from batch size
# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)
BATCH_SIZE = 32
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 3.0
# Warmup is a period of time where hte learning rate
# is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 500
SAVE_SUMMARY_STEPS = 100
|
import z
import datetime
import collections
tmonth = datetime.date.today().month
tday = datetime.date.today().day
dates = z.getp("dates")
spy = z.getCsv("SPY")
dlist = (spy["Date"].tolist())
avgdict = collections.defaultdict(list)
for ayear in range(2, 18):
year = "200{}".format(ayear)
if ayear >= 10:
year = "20{}".format(ayear)
date = "{}-0{}-{}".format(year,tmonth, tday)
# print("date : {}".format( date ))
tomorrow = "{}-0{}-{}".format(year,tmonth, tday+1)
# print("tomorrow : {}".format( tomorrow ))
try:
idx = dlist.index(date)
except:
continue
opend = spy.at[idx,"High"]
close = spy.at[idx,"Low"]
avgdict["today"].append(close/opend)
opend = spy.at[idx+1,"High"]
close = spy.at[idx+1,"Low"]
avgdict["tomorrow"].append(close/opend)
opend = spy.at[idx+2,"High"]
close = spy.at[idx+2,"Low"]
avgdict["nextday"].append(close/opend)
for key,something in avgdict.items():
maxv = round(max(something),4)
avg = round(sum(something)/len(something) ,4)
print("maxv : {}".format( maxv ))
print("avg : {}".format( avg ))
print("score : {}".format( round(avg + maxv,4)))
|
# Generated by Django 3.0.8 on 2020-08-03 11:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prezola', '0006_orderlineitem_status'),
]
operations = [
migrations.AddField(
model_name='orderlineitem',
name='quantity_purchased',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
|
# functions which simplify the choosing of stimulation frequencies
# this is based on an a set of valid frequencies determined externally to this
# code and is based on the experimental data
# format [[UpTime, DownTime, SendFreq, closet = 128, closest = 256, closest = 384, closest = 512, closest =640, closest = 768],..]
FreqLookUp = [[ 18, 25, 23.26, 23, 23.5, 23.33, 23.25, 23.2, 23.33],
[ 17, 25, 23.81, 24, 24 , 23.66, 23.75, 23.8, 23.83],
[ 17, 24, 24.39, 24, 24.5, 24.33, 24.5, 24.4, 24.33],
[ 16, 24, 25.00, 25, 25 , 25 , 25 , 25 , 25],
[ 16, 23, 25.64, 26, 25.5, 25.66, 25.75, 25.6, 25.66],
[ 16, 22, 26.36, 26, 26.5, 26.33, 26.25, 26.4, 26.33],
[ 15, 22, 27.03, 27, 27 , 27 , 27 , 27 , 27],
[ 15, 21, 27.78, 28, 28 , 27.66, 27.75, 27.8, 27.83],
[ 14, 21, 28.57, 29, 28.5, 28.66, 28.5, 28.6, 28.5],
[ 14, 20, 29.41, 29, 29.5, 29.33, 29.5, 29.4, 29.33],
[ 14, 19, 30.30, 30, 30.5, 30.33, 30.25, 30.4, 30.33],
[ 13, 19, 31.25, 31, 31.5, 31.33, 31.25, 31.2, 31.33],
[ 13, 18, 32.26, 32, 32.5, 32.33, 32.25, 32.2, 32.33],
[ 13, 17, 33.33, 33, 33.5, 33.33, 33.25, 33.4, 33.33],
[ 13, 16, 34.48, 34, 34.5, 34.33, 34.5, 34.4, 34.5],
[ 13, 15, 35.71, 36, 35.5, 35.66, 35.75, 35.8, 35.66],
[ 13, 14, 37.04, 37, 37 , 37 , 37 , 37 , 37],
[ 13, 13, 38.46, 38, 38.5, 38.33, 38.5, 38.4, 38.5 ]]
def SenderGetUpAndDown(frequency):
for i in range(len(FreqLookUp)):
if FreqLookUp[i][2] == frequency:
return ([FreqLookUp[i][0], FreqLookUp[i][1]])
print(frequency)
raise NameError ('Frequency Not Valid')
def getIndex(SS):
if SS == 128:
return(3)
elif SS == 256:
return(4)
elif SS == 384:
return(5)
elif SS == 512:
return(6)
elif SS == 640:
return(7)
elif SS == 768:
return(8)
else:
raise NameError('Block Size Not Implemented')
def mapToClosestFrequencies(frequency_set, sample_size):
# function which converts a set a set of Send Frequencies to recieve frequencies based on the resolution of the FFT given a certain window size
index = getIndex(sample_size)
newSet = []
badFrequencyFlag = True
for i in range(len(frequency_set)):
for j in range(len(FreqLookUp)):
if FreqLookUp[j][2] == frequency_set[i]:
newSet = newSet + [FreqLookUp[j][index]]
badFrequencyFlag = False
if badFrequencyFlag == True:
raise NameError('invalid frequency input')
return(newSet)
|
import mysql.connector
import random
import datetime
from flask import Flask, render_template, request, flash, redirect, url_for, make_response
def ValidateNewUserLogIn(PossibleUser,PasswordProvided):
if PossibleUser == "admin" and PasswordProvided == "password":
return PossibleUser
Results = SQLExecute("""Select Password FROM NEADatabase2.tblusers Where UserName = %s""", (PossibleUser, ),'r')
print('Results from SQL', end = " ")
for item in Results:
print(item)
if item == (PasswordProvided, ):
return PossibleUser
return "# " + PossibleUser + " unknown."
def InvalidCredential(PossibleUser,Code):
Results = SQLExecute("""Select SessionID FROM NEADatabase2.tblusers Where UserName = %s""", (PossibleUser, ),'r')
print('Results from SQL [SessionID]', end = " ")
for x in Results:
print(x)
if x == (Code, ):
return False
return True
def Authenticated(PossibleUser,PasswordProvided):
Results = SQLExecute("""Select Password FROM NEADatabase2.tblusers Where UserName = %s""", (PossibleUser, ),'r')
print('Results from SQL')
for x in Results:
print(x)
if x == (PasswordProvided, ):
return PossibleUser
return "# " + PossibleUser + " unknown."
def AuthenticatedRenderTemplate(Webpage, Username):
# Generate SessionTokenCode
SessionTokenCode = SecureString(8, 65, 91)
# Modify SQL Database with Session and Expiry Date/Time
SQLExecute("UPDATE NEADatabase2.tblusers SET SessionID = %s, SessionExpire = %s Where UserName = %s",
(SessionTokenCode, datetime.datetime.now() + datetime.timedelta(0, 60 * 10), Username),'u')
# Set up Webpage to reply to User with including Cookies for future authenticated use
res = make_response(render_template(Webpage, title=Username))
res.set_cookie('ID', SessionTokenCode, max_age=60 * 10)
res.set_cookie('User', Username, max_age=60 * 10)
return res
def SecureString(Number, Low, High, Omit=[]):
#Omit is a list of Ascii codes to be omitted from the String like ' or "
if Number > 1 :
#Use Recursion to generate the previous character list then concatenate a new one
return SecureString(Number - 1, Low, High, Omit) + chr(RandomNumber(Low,High,Omit))
else :
# At the bottom of the Recursion so return a single character
return chr(RandomNumber(Low,High,Omit))
def RandomNumber(Low, High, Omit=[]) :
#Set PossibleNumberValid Flag to False to force loop to execute
PossibleNumberValid = False
while not PossibleNumberValid :
#Select an integer number that is >= Low, but < High
SelectNumber = random.randrange(Low, High)
#Assume the number is valid and adjust flag
PossibleNumberValid = True
#Loop through the Omit Array, should the SelectNumber be in the list set the flag to False
for EachNumber in Omit:
if EachNumber == SelectNumber:
PossibleNumberValid = False
return SelectNumber
print("Test")
for x in range(1, 6):
print(SecureString(50,33,127,(34,67)))
def SQLExecute(SQLCommand, SQLAddress, type):
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="PenygelliSQL2!",
)
print('SQL Execute start', end = " ")
print(SQLCommand, end = " ")
print(SQLAddress)
# TheCursor is linked to the Database
TheCursor = db.cursor()
#TheDatabase.commit()
if type == 'r':
Results = TheCursor.fetchall()
TheCursor.execute(SQLCommand, SQLAddress)
print('Done')
return Results
else:
if SQLAddress == "":
TheCursor.execute(SQLCommand)
else:
TheCursor.execute(SQLCommand, SQLAddress)
print(TheCursor.rowcount, "record(s) affected")
db.commit()
db.close()
'''
def SQLUpdate(SQLCommand, SQLAddress):
TheDatabase = mysql.connector.connect(
host="localhost",
user="root",
passwd="Apple",
database="testdatabase",
)
TheCursor = TheDatabase.cursor()
print(SQLAddress, end = " ")
print(SQLCommand, end = " ")
if SQLAddress == "" :
TheCursor.execute(SQLCommand)
else:
TheCursor.execute(SQLCommand, SQLAddress)
TheDatabase.commit()
print(TheCursor.rowcount, "record(s) affected")
return TheCursor.rowcount'''
|
dict1= {
"<action>":[],
"<attr_name>":[],
"<table_name>":[],
"<condition_name>":[],
"<condition>":[],
"<value>":[],
"<logic>":[]
}
dictionary11= {
"ssc": ["10th"],
"hsc": ["12th"],
"OR": ["or","OR"],
"DESC": ["descending","decreasing"],
"ASC": ["ascending","increasing"],
"SELECT":["Fetch",'fetch',"find","show","give","tell","can","get",'select',"pick","extract","discover","uncover","gave","display","SELECT","print"],
"aggregate":['average',"average","mean"],
"name":['names',"name's"],
"*":["all","detail","every","completely","fully","thoroughly","entir","altogether","whole"],
"WHERE":["whose",'with','who','where','having','have','haved'],
"AND":["and","well","also","but","AND"],
"FROM student":["student","students","student's"],
"ORDER BY":"order",
"word":['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten',
'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen',
'eighteen', 'nineteen', 'twenty', 'thirty','fourty', 'fifty', 'sixty','seventy','eighty','ninety','hundred'],
"<number>":['1', '2', '3', '4', '5', '6', '7', '8','9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28',
'29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54',
'55', '56', '57', '58', '59', '60', '61', '62', '63','64', '65', '66', '67',' 68', '69', '70', '71', '72', '73', '74', '75', '76', '77','78', '79', '80',
'81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98','99', '100'],
}
dictionary3={
"<":['less than','not more than','lower than','less','lower','le','below'],
">":['greater','more','above'],
"=":['equal'],
"* FROM":['*<keyword>']
}
|
# 导入sqlite驱动
import sqlite3
def createConn(dbName):
# 连接到sqlite数据库
# 数据库文件是test.db
# 如果文件不存在, 会自动在当前目录创建
conn = sqlite3.connect(dbName)
return conn
def createCursor(conn):
# 创建一个Cursor
cursor = conn.cursor()
return cursor
def close(cursor, conn):
# 关闭cursor
cursor.close()
# 提交事务
conn.commit()
# 关闭connection
conn.close()
def selectEle(cursor, tableName):
# 查询
cursor = cursor.execute('select * from user where id=?', ('1',))
values = cursor.fetchall()
print(values)
conn = createConn('test.db')
cursor = createCursor(conn)
# cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
cursor.execute('insert into user (id, name) values (\'1\',\'Micheal\')')
selectEle(cursor, conn)
close(cursor, conn)
|
--- setup.py.orig 2021-01-14 10:34:05 UTC
+++ setup.py
@@ -39,7 +39,7 @@ setup(
'isodate>=0.5.0',
'lxml>=3.3.5',
'xmlsec>=1.0.5',
- 'defusedxml==0.6.0'
+ 'defusedxml>=0.6.0'
],
dependency_links=['http://github.com/mehcode/python-xmlsec/tarball/master'],
extras_require={
|
#!/usr/bin/env python
"""Load COMTRADE data, aggregate and calculate steel contents.
Usage:
aggregate_trade_flows.py --allocation ALLOCATION-FILE --steel-contents CONTENTS-FILE COMTRADE-FILE...
Options:
--allocation ALLOCATION-FILE CSV file with mappings from SITC v2 codes to product categories
--steel-contents CONTENTS-FILE CSV file with steel contents for each category
"""
import os
import os.path
import json
import pandas as pd
from logzero import logger
from docopt import docopt
logger.info('Starting %s', os.path.basename(__file__))
def load_file(filename):
"""Load one JSON file from UN COMTRADE."""
with open(filename, 'rt') as f:
d = json.load(f)
return pd.DataFrame.from_records(d['dataset'])
def load_all_data(files):
"""Load all the given JSON files."""
return pd.concat([load_file(file) for file in files], ignore_index=True)
def overwrite_data(trade, code, flow, years):
ii = ((trade['cmdCode'] == code) &
(trade['period'].isin(years)) &
(trade['rgDesc'] == flow))
assert sum(ii) > 0, 'No matches!'
new_value = trade['NetWeight'][ii & (trade['period'] == years[0])].iloc[0]
logger.debug('overwriting %s %s for %s to %s', code, flow, years, new_value)
trade.loc[ii, 'NetWeight'] = new_value
def check_values(df, column, value):
msg = 'Expected "%s" column to be %r' % (column, value)
assert all(df[column] == value), msg
def main(files, alloc_filename, contents_filename):
# Load the trade data
trade = load_all_data(files)
# Apply "corrections"
overwrite_data(trade, '69402', 'Import', [2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007])
overwrite_data(trade, '874', 'Import', [1999, 2000])
# Check that it has the expected data
check_values(trade, 'rt3ISO', 'GBR')
check_values(trade, 'pt3ISO', 'WLD')
# Load the allocation table and category iron contents
alloc = pd.read_csv(alloc_filename, index_col='SITC_code')
cats = pd.read_csv(contents_filename, index_col=0)
# Validate allocations
mult_sums = alloc['multiplier'].groupby('SITC_code').sum()
assert all((mult_sums == 0) | (mult_sums == 1)), 'Multipliers must sum to 0 or 1'
split_allocs = alloc[(alloc['multiplier'] != 0) &
(alloc['multiplier'] != 1) &
~pd.isnull(alloc['multiplier'])]
logger.debug('Split allocations:\n' +
str(split_allocs[['sector_code', 'stage', 'multiplier']]))
# Join the table and aggregate
table = trade \
.join(alloc, on='cmdCode', how='outer') \
.join(cats, on='sector_code') \
.dropna(subset=['sector_code']) \
.rename(columns={'period': 'year',
'rgDesc': 'direction'})
# Convert kg to kt and add in the category iron contents. `multiplier` is for
# sharing an HS4-flow between multiple sector-flows
table['mass'] = table['NetWeight'] * table['multiplier'] / 1e6
table['mass_iron'] = table['mass'] * table['iron_content']
os.makedirs('build', exist_ok=True)
table.to_csv('build/checking_table.csv', index=False)
agg = table \
.groupby(['direction', 'sector_code', 'stage', 'year'], as_index=False) \
.agg({
'mass': 'sum',
'mass_iron': 'sum',
'sector_group': 'first', # same in each group of sector_codes
'sector_name': 'first',
'iron_content': 'first',
})
# Save
df = agg[['sector_code', 'sector_group', 'sector_name', 'direction',
'stage', 'year', 'iron_content', 'mass', 'mass_iron']]
df['year'] = df['year'].astype(int)
df['iron_content'] = df['iron_content'].round(2)
df['mass'] = df['mass'].round(1)
df['mass_iron'] = df['mass_iron'].round(1)
df['direction'] = df['direction'].str.lower()
df.to_csv('data/trade.csv', index=False)
if __name__ == '__main__':
args = docopt(__doc__)
print(args)
main(files=args['COMTRADE-FILE'],
alloc_filename=args['--allocation'],
contents_filename=args['--steel-contents'])
|
import click
import random
def beta_convert(b, n):
"""Returns given n in b number system"""
if b is 2:
return bin(n)
elif b is 8:
return oct(n)
elif b is 16:
return hex(n)
return n
@click.command()
def from_to():
"""Generates a random number in two different
number systems and quizzes the user about the
conversion between those two.
"""
betas = [2, 8, 10, 16]
beta = random.choice(betas)
beta_2 = random.choice(betas)
rand = random.randint(1, 500)
while beta == beta_2:
beta_2 = random.choice(betas)
a = beta_convert(beta, rand)
b = beta_convert(beta_2, rand)
print (f"Convert {a} base {beta} to base {beta_2}")
answer = click.prompt("Answer")
if answer == b:
print("Correct!")
else:
print(f"You answered {answer}, the correct answer is {b}")
if __name__ == '__main__':
from_to()
|
import maya.cmds as cmds
import os
calabash_menu = None
def calabash_menu():
global calabash_menu
if cmds.menu('calabash_m', exists=True):
cmds.deleteUI('calabash_m')
###############################################################################
calabash_menu = cmds.menu('calabash_m', p='MayaWindow', label='Calabash', tearOff=True)
###############################################################################
this_path = os.path.normpath(os.path.dirname(__file__))
up_path = (os.path.dirname(this_path))
version_file = open(os.path.join(up_path, "version.md"), "r")
ver = version_file.read()
general_submenu = cmds.menuItem('general_sub', p=calabash_menu, subMenu=True, label='General', tearOff=True)
ani_submenu = cmds.menuItem('ani_sub', p=calabash_menu, subMenu=True, label='Animation', tearOff=True)
model_submenu = cmds.menuItem('model_sub', p=calabash_menu, subMenu=True, label='Modeling', tearOff=True)
rendering_submenu = cmds.menuItem('rendering_sub', p=calabash_menu, subMenu=True, label='Render', tearOff=True)
rigging_submenu = cmds.menuItem('rigging_sub', p=calabash_menu, subMenu=True, label='Rigging', tearOff=True)
shading_submenu = cmds.menuItem('shading_sub', p=calabash_menu, subMenu=True, label='Shading', tearOff=True, version="2017")
xgen_submenu = cmds.menuItem('xgen_sub', p=calabash_menu, subMenu=True, label='XGen', tearOff=True)
fx_submenu = cmds.menuItem('fx_sub', p=calabash_menu, subMenu=True, label='FX', tearOff=True)
publish_submenu = cmds.menuItem('pub_sub', p=calabash_menu, subMenu=True, label='Publish', tearOff=True)
pipeman_submenu = cmds.menuItem(p=calabash_menu, label='Pipeline Manager', c='from pipeman import pipeman;reload(pipeman),pipeman.run()')
#cmds.menuItem(p=rigging_submenu, divider=True, itl=True)
#hatch_submenu = cmds.menuItem('hatch_sub', p=calabash_menu, subMenu=True, label='Hatchimals', tearOff=True)
###############################################################################
# General Submenu
cmds.menuItem(p=general_submenu, label='Increase File Version', c='from calabash import increaseVersion;reload(increaseVersion);increaseVersion.versionUp()')
cmds.menuItem(p=general_submenu, label='Check For Updates...', c='from calabash import update;reload(update);update.check()')
cmds.menuItem(p=general_submenu, divider=True, dividerLabel='v%s' % ver, itl=True)
###############################################################################
# Animation Submenu
cmds.menuItem(p=ani_submenu, label='Good Playblast', c='from goodPlayblast_c import playblast_utils as gu; reload(gu); pb = gu.Playblaster(); pb.playblast()', image="goodplayblast.png")
cmds.menuItem(p=ani_submenu, optionBox=True, c='import goodPlayblast_c.playblast_view as gp; reload(gp); gp.launch()')
###############################################################################
# Modeling Submenu
cmds.menuItem(p=model_submenu, label='Delete Intermediate Shapes', c='from calabash import model_utils;reload(model_utils);model_utils.del_int_shapes()')
cmds.menuItem(p=model_submenu, label='Basic Mesh Cleanup', c='from calabash import model_utils;reload(model_utils);model_utils.cleanup_mesh()', version="2017")
cmds.menuItem(p=model_submenu, label='abSymMesh', c='from maya import mel; mel.eval("abSymMesh")', version="2017")
###############################################################################
# Rendering Submenu
cmds.menuItem(p=rendering_submenu, label='Submit to Smedge', c='from maya import mel; mel.eval("smedgeRender");')
cmds.menuItem(p=rendering_submenu, label='Set Overscan', c='from calabash import overscan; reload(overscan); overscan.run()')
cmds.menuItem(p=rendering_submenu, divider=True, dividerLabel='Vray Attributes', itl=True)
cmds.menuItem(p=rendering_submenu, label='Vray Toolbox', c='from calabash import vray_toolbox;reload(vray_toolbox);vray_toolbox.launch()')
cmds.menuItem(p=rendering_submenu, label='Add Material ID Attributes (Beta)', c='from calabash import vrayUtils;reload(vrayUtils);vrayUtils.makeVrayMatId()')
cmds.menuItem(p=rendering_submenu, divider=True, dividerLabel='Vray Object Properties', itl=True)
cmds.menuItem(p=rendering_submenu, label='Apply single object properties node to selection', c='from calabash import vrayUtils;reload(vrayUtils);vrayUtils.single_vop()')
cmds.menuItem(p=rendering_submenu, label='Primary Vis Off', c='from calabash import vrayUtils;reload(vrayUtils);vrayUtils.primVis()')
cmds.menuItem(p=rendering_submenu, label='Matte Surface', c='from calabash import vrayUtils;reload(vrayUtils);vrayUtils.matteSurface()')
cmds.menuItem(p=rendering_submenu, divider=True, dividerLabel='Render Settings', itl=True)
cmds.menuItem(p=rendering_submenu, label='Apply Final Render Settings', c='from calabash import vrayUtils;reload(vrayUtils);vrayUtils.renderSettings()')
cmds.menuItem(p=rendering_submenu, label='Apply Final Render GI Settings', c='from calabash import vrayUtils;reload(vrayUtils);vrayUtils.giSettings()')
cmds.menuItem(p=rendering_submenu, label='Render Elements for Selected Lights', c='from maya import mel;mel.eval("vrLightPass;")')
###############################################################################
# Publish Submenu
cmds.menuItem(p=publish_submenu, label='Publish Selected Asset', c='from calabash import fileUtils;reload(fileUtils);fileUtils.publishCurrentFile()')
cmds.menuItem(p=publish_submenu, label='Publish Animation', c='from calabash import animUtils;reload(animUtils);animUtils.publishAnim()')
cmds.menuItem(p=publish_submenu, label='Create AutoCache', c='from calabash import animUtils; reload(animUtils); animUtils.autocache_gui.run()')
cmds.menuItem(p=publish_submenu, label='Publish Camera from animation', c='from calabash import animUtils; reload(animUtils); animUtils.ouroboros()')
#cmds.menuItem(p=rigging_submenu, label='Publish Vray Shading', c='from calabash import fileUtils;reload(fileUtils);fileUtils.publish_vray_rig()')
cmds.menuItem(p=publish_submenu, label='Publish Groom', c='from calabash import fileUtils;reload(fileUtils);fileUtils.publish_groom_rig()')
cmds.menuItem(p=rigging_submenu, divider=True, itl=True)
cmds.menuItem(p=rigging_submenu, label='Hide Joints', c='from calabash import rigUtils;reload(rigUtils);rigUtils.jointDisplay()')
cmds.menuItem(p=rigging_submenu, label='Show Joints', c='from calabash import rigUtils ;reload(rigUtils);rigUtils.jointDisplay(show=True)')
cmds.menuItem(p=rigging_submenu, divider=True, itl=True)
cmds.menuItem(p=rigging_submenu, label='MoveCtrl', c='from calabash import moveControl;reload(moveControl);moveControl.moveCtrlUI()')
cmds.menuItem(p=rigging_submenu, label='Jamm Joint Orient', c='import jammOrientJoint as oj;oj.orientJointsWindow()',version="2017")
###############################################################################
# Shading Submenu
cmds.menuItem(p=shading_submenu, label='Rename Shading Groups', c='from calabash import shading_utils;reload(shading_utils);shading_utils.rename_shading_groups()', version="2017")
cmds.menuItem(p=shading_submenu, label='Apply Materials to Asset', c='from calabash import shading_utils;reload(shading_utils);shading_utils.apply_look()')
###############################################################################
# XGen Submenu
cmds.menuItem(p=xgen_submenu, label='Cache Descriptions', c='from calabash import xgen_utils;reload(xgen_utils);xgen_utils.cache_groomableSplines()')
cmds.menuItem(p=xgen_submenu, label='Repath Caches', c='from calabash import xgen_utils;reload(xgen_utils);xgen_utils.repath_caches()')
###############################################################################
# FX Submenu
cmds.menuItem(p=fx_submenu, label='RGB TBS', c='from calabash import rgb_tbs;reload(rgb_tbs);rgb_tbs.tbs()')
cmds.menuItem(p=fx_submenu, label='Classic TBS', c='mel.eval("TBS")')
###############################################################################
# Hatchimal Submenu
# cmds.menuItem(p=hatch_submenu, label='Publish Season2 Rig', c='from calabash import oldHatchUtils;reload(oldHatchUtils);oldHatchUtils.ohPublishCurrentFile()')
# cmds.menuItem(p=hatch_submenu, label='Publish Season2 No Vray Rig', c='from calabash import oldHatchUtils;reload(oldHatchUtils);oldHatchUtils.ohPublish_mayaMat_rig()')
# cmds.menuItem(p=hatch_submenu, label='Rename New Hatch Rigs', c='from calabash import fileUtils;reload(fileUtils);fileUtils.rename_hatch_rigs()')
calabash_menu()
|
# list 模拟栈
class Solution:
def maxDepth(self, s: str) -> int:
maxn = 0
l = []
for t in s:
if t == '(':
l.append(1)
maxn = max(maxn, len(l))
elif t == ')':
l.pop()
return maxn
|
import tensorflow as tf
import numpy as np
from dps.register import RegisterBank
from dps.env import TensorFlowEnv
from dps.utils import Param, Config
def build_env():
return GridBandit()
config = Config(
build_env=build_env,
curriculum=[dict()],
env_name='grid_bandit',
threshold=-5,
T=5,
shape=(2, 2),
n_arms=10,
)
class GridBandit(TensorFlowEnv):
"""
Agent starts off in random location. Agent can move around the grid, and can perform a `look`
action to reveal an integer stored at its current location in the grid. It can also pull a number
of arms determined by `n_arms` (these arms can be pulled anywhere, they have no spatial location).
Also, one arm is pulled at all times; taking an action to pull one of the arms persistently
changes the arm the agent is taken to be pulling. The integer stored in the top left location gives
the identity of the correct arm. The optimal strategy for an episode is to move to the top-left corner,
perform the `look` action, and then pull the correct arm thereafter.
The agent receives a reward of 1 for every step that it pulls the correct arm, and 0 otherwise.
"""
T = Param()
shape = Param()
n_val = Param()
n_arms = Param(2)
def __init__(self, **kwargs):
self.action_names = '^ > v < look'.split() + ["arm_{}".format(i) for i in range(self.n_arms)]
self.action_shape = (len(self.action_names),)
self.rb = RegisterBank(
'GridBanditRB', 'x y vision action arm', None, [0.0, 0.0, -1.0, 0.0, 0.0], 'x y',
)
self.val_input = self._make_input(self.n_val)
self.test_input = self._make_input(self.n_val)
super(GridBandit, self).__init__()
def _make_input(self, batch_size):
start_x = np.random.randint(self.shape[0], size=(batch_size, 1))
start_y = np.random.randint(self.shape[1], size=(batch_size, 1))
grid = np.random.randint(self.n_arms, size=(batch_size, np.product(self.shape)))
return np.concatenate([start_x, start_y, grid], axis=1).astype('f')
def _build_placeholders(self):
self.input = tf.placeholder(tf.float32, (None, 2+np.product(self.shape)), name="input")
def _make_feed_dict(self, n_rollouts, T, mode):
if mode == 'train':
inp = self._make_input(n_rollouts)
elif mode == 'val':
inp = self.val_input
elif mode == 'test':
inp = self.test_input
else:
raise Exception("Unknown mode: {}.".format(mode))
if n_rollouts is not None:
inp = inp[:n_rollouts, :]
return {self.input: inp}
def build_init(self, r):
return self.rb.wrap(
x=self.input[:, 0:1], y=self.input[:, 1:2],
vision=r[:, 2:3], action=r[:, 3:4], arm=r[:, 4:5])
def build_step(self, t, r, actions):
x, y, vision, action, current_arm = self.rb.as_tuple(r)
up, right, down, left, look, *arms = tf.split(actions, 5+self.n_arms, axis=1)
new_y = (1 - down - up) * y + down * (y+1) + up * (y-1)
new_x = (1 - right - left) * x + right * (x+1) + left * (x-1)
new_y = tf.clip_by_value(new_y, 0.0, self.shape[0]-1)
new_x = tf.clip_by_value(new_x, 0.0, self.shape[1]-1)
idx = tf.cast(y * self.shape[1] + x, tf.int32)
new_vision = tf.reduce_sum(
tf.one_hot(tf.reshape(idx, (-1,)), np.product(self.shape)) * self.input[:, 2:],
axis=1, keepdims=True)
vision = (1 - look) * vision + look * new_vision
action = tf.cast(tf.reshape(tf.argmax(actions, axis=1), (-1, 1)), tf.float32)
arm_chosen = tf.reduce_sum(tf.concat(arms, axis=1), axis=1, keepdims=True) > 0.5
chosen_arm = tf.reshape(tf.argmax(arms), (-1, 1))
current_arm = tf.cast(current_arm, tf.int64)
new_current_arm = tf.where(arm_chosen, chosen_arm, current_arm)
new_registers = self.rb.wrap(
x=new_x, y=new_y, vision=vision, action=action, arm=tf.cast(new_current_arm, tf.float32))
correct_arm = tf.equal(new_current_arm, tf.cast(self.input[:, 2:3], tf.int64))
reward = tf.cast(correct_arm, tf.float32)
return tf.fill((tf.shape(r)[0], 1), 0.0), reward, new_registers
|
import pandas as pd # 导入pandas库
data = pd.read_csv("F:\\数据采集\\数据采集课设\\淘宝空调数据.csv",encoding='utf-8-sig')
print(data.describe())
# 将工资低于1000或者高于10万的异常值清空
data[u'views_price'][(data[u'views_price'] < 900) | (data[u'views_price']> 30000)] = None
data.dropna()#删除空缺值
print(data.describe())
print(data.shape)
data.to_csv('F:\\数据采集\\数据采集课设\\淘宝空调数据.csv',index = False,encoding='utf-8-sig')
|
import time
import threading
inicio = time.perf_counter()
def aDormir():
print("Iniciando función, voy a dormir 1 s")
time.sleep(1)
print("Paso un segundo, he despertado")
#Vamos a comparar ahora creando dos hilos
#Para luego poder iterar sobre todos los hilos crearemos una lista vacia llamada hilos
hilos=[]
#En un bucle fos crearemos 10 hilos
for _ in range(10):
hilo = threading.Thread(target= aDormir)
hilo.start()
#Y guardaremos todos los hilos creados en nuestra lista hilos
hilos.append(hilo)
#Ahora iterando en hilos vamos a unirlos para que esperen a la ejecución de todos antes
#de continuar con el código siguiente, en este caso la impresión del tiempo de ejecución
for h in hilos:
h.join()
aDormir()
final = time.perf_counter()
print(f"Código ejecutado en {final- inicio, 2} segundos")
|
import numpy as np
import os, sys
import argparse
import csv
parser = argparse.ArgumentParser('Character-based Model for LAS')
parser.add_argument('--data-path', default='all', type=str, help='Path to data')
parser.add_argument('--write-file', default='', type=str, help='csv file to write vocabulary')
parser.add_argument('--trans-file', default='', type=str, help='new transcript (npy) file to write to')
parser.add_argument('--first', default=1, type=int, help='if vocab.csv needs to be generated')
args = parser.parse_args()
orig_transcripts = np.load(args.data_path)
vocab = {'SOS': 0, 'EOS':1, ' ':2}
count = 3
new_word = []
new_transcript = []
index = 0
for example in orig_transcripts:
new_transcript.append([])
new_transcript[index].append(int(vocab['SOS']))
for word in example:
decoded_word = word.decode('utf-8')
for l in range(len(decoded_word)):
if decoded_word[l] not in vocab.keys():
vocab[decoded_word[l]] = count
count += 1
new_transcript[index].append(int(vocab[decoded_word[l]]))
new_transcript[index].append(int(vocab[' ']))
new_transcript[index].append(int(vocab['EOS']))
new_transcript[index] = np.array(new_transcript[index])
index += 1
np.save(os.path.join('', args.trans_file), np.array(new_transcript))
if args.first:
with open(os.path.join('', args.write_file), 'w') as csvfile:
writer = csv.writer(csvfile)
for key, value in vocab.items():
writer.writerow([key, value])
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is a demo for V3 features.
The script will launch all the processes necessary to bring up
the demo. It will bring up an HTTP server on port 8000 by default,
which you can override. Once done, hitting <Enter> will terminate
all processes. Vitess will always be started on port 12345.
"""
import json
import optparse
import os
import subprocess
import thread
from CGIHTTPServer import CGIHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from google.protobuf import text_format
from vtproto import vttest_pb2
def start_http_server(port):
httpd = HTTPServer(('', port), CGIHTTPRequestHandler)
thread.start_new_thread(httpd.serve_forever, ())
def start_vitess():
"""This is the main start function."""
topology = vttest_pb2.VTTestTopology()
keyspace = topology.keyspaces.add(name='user')
keyspace.shards.add(name='-80')
keyspace.shards.add(name='80-')
keyspace = topology.keyspaces.add(name='lookup')
keyspace.shards.add(name='0')
vttop = os.environ['VTTOP']
args = [os.path.join(vttop, 'py/vttest/run_local_database.py'),
'--port', '12345',
'--proto_topo', text_format.MessageToString(topology,
as_one_line=True),
'--web_dir', os.path.join(vttop, 'web/vtctld'),
'--schema_dir', os.path.join(vttop, 'examples/demo/schema')]
sp = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# This load will make us wait for vitess to come up.
print json.loads(sp.stdout.readline())
return sp
def stop_vitess(sp):
sp.stdin.write('\n')
sp.wait()
def main():
parser = optparse.OptionParser()
parser.add_option('-p', '--port', default=8000, help='http server port')
(options, unused_args) = parser.parse_args()
sp = start_vitess()
try:
start_http_server(options.port)
raw_input('\n'
'Demo is running at: http://localhost:%d/\n'
'\n'
'Press enter to exit.\n' % options.port)
finally:
stop_vitess(sp)
if __name__ == '__main__':
main()
|
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
temp = [0]*20001
for num in nums:
temp[num+10000] += 1
for i in range(len(temp)-1, 0, -1):
k -= temp[i]
if k <= 0:
return i-10000
return 0
|
import json
data = {'a': 'A', 'c': 3.0, 'b': (2, 4), 'd': [1, 2.0, "3", True]}
with open("data.json", mode="w") as f:
json.dump(data, f)
with open("data.json") as f:
data = json.load(f)
print data['b']
print data['d']
"""
[2, 4]
[1, 2.0, u'3', True]
"""
|
from nab import config
from nab import register
from nab import log
_log = log.log.getChild("database")
class Database(register.Entry):
_register = register.Register()
_type = "database"
def get_show_titles(self, show):
return []
def get_show_absolute_numbering(self, show):
return False
def get_show_ids(self, show):
return {}
def get_banner(self, show):
return None
def get_seasons(self, show):
return []
def get_episodes(self, season):
return []
def databases():
return Database.get_all(config.config["databases"])
def get_data(show):
_log.debug("Searching for %s" % show)
# get all titles for show
_log.debug("Getting titles")
for db in databases():
show.titles.update(db.get_show_titles(show))
# get if should use absolute numbering
_log.debug("Getting absolute numbering")
for db in databases():
if db.get_show_absolute_numbering(show):
show.absolute = True
break
# get ids of show
_log.debug("Getting ids")
for db in databases():
show.ids = dict(show.ids.items() + db.get_show_ids(show).items())
# get banner for show
_log.debug("Getting banner")
for db in databases():
show.banner = db.get_banner(show)
if show.banner:
break
# get seasons for show
_log.debug("Getting seasons and episodes")
for db in databases():
for season in db.get_seasons(show):
# get episodes for season
for episode in db.get_episodes(season):
if episode.num in season:
season[episode.num].merge(episode)
else:
season[episode.num] = episode
if season.num in show:
show[season.num].merge(season)
else:
show[season.num] = season
show.format()
|
str1='Hello'
str2='there'
bob=str1+str2
print(bob)
str3='123'
x=int(str3)+1 # convert into integer number
print(x)
|
from __future__ import division
import argparse
import torch
import os
import cv2
import numpy as np
from models import *
parser = argparse.ArgumentParser(description='PyTorch face landmark')
# Datasets
parser.add_argument('-img', '--image', default='spine', type=str)
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--gpu_id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('-c', '--checkpoint', default='checkpoint/00/model_best.pth.tar', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
args = parser.parse_args()
def load_model():
model = resnet(136)
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == '__main__':
out_size = 256
model = load_model()
model = model.eval()
import glob,random
path1 = '/home/felix/data/AASCE/boostnet_labeldata/data/training/*.jpg'
path2 = '/home/felix/data/AASCE/boostnet_labeldata/data/test/*.jpg'
path3 = '/home/felix/data/AASCE/boostnet_labeldata/data/test2/*.jpg'
sam = random.sample(glob.glob(path3),5)
for filename in sam:
name = os.path.basename(filename)
img = cv2.imread(filename)
img = cv2.resize(img,(128,256))
raw_img = img
img = img/255.0
img = (img-np.mean(img))/np.std(img)
img = img.transpose((2, 0, 1))
img = img.reshape((1,) + img.shape)
input = torch.from_numpy(img).float()
input= torch.autograd.Variable(input)
out = model(input).cpu().data.numpy()
out = out.reshape(-1,2)
raw_img = cv2.resize(raw_img,(128,256))
for i in range(68):
cv2.circle(raw_img,(int(out[i][1]*128),int(out[i][0]*256)),2,(255,0,0),-1)
cv2.imwrite('result_{a}.png'.format(a=name),raw_img)
print(name)
print('done!')
|
import sys
import os
import xml.dom.ext
from xml.dom import XML_NAMESPACE, XMLNS_NAMESPACE, DOMException
from xml.dom.ext import Printer
from xml.dom.minidom import parseString, parse, Node
uiSetStub="""
<set
description='Automaticaly generated structure. Please do not change it !! All changes will be overwritten !!!'
name=''/>
"""
voSetStub='<ui.VirtualOrganisation name=""/>'
class GliteUIsetParams:
def __init__(self, domVO):
# if domVO==string create dom from a stub
self.doc = parseString(uiSetStub)
self.root = self.doc.documentElement
self.root.setAttribute("name", domVO)
self.vo = parseString(voSetStub).documentElement
self.vo.setAttribute("name", domVO)
self.nsNode = parseString("<ui.NSAddresses/>").documentElement
self.vo.appendChild(self.nsNode)
self.root.appendChild(self.vo)
# if domVO==XMLnode import dom structure
def addNS(self, nsName, lbNames):
lbNode = parseString("<ui.LBAddresses/>").documentElement
for lb in lbNames:
lbNode.appendChild(self.__addLB(lb))
itemNode = parseString("<item/>").documentElement
itemNode.setAttribute("value", nsName)
itemNode.appendChild(lbNode)
self.nsNode.appendChild(itemNode)
def __addLB(self, lbName):
valueDoc = parseString("<value/>")
valueNode = valueDoc.documentElement
valueNode.appendChild(valueDoc.createTextNode( lbName ))
return valueNode
def addParameter(self,name,value):
param = self.doc.createElement(name)
param.setAttribute("value",value)
self.vo.appendChild(param)
def addArrayParameter(self, name, values):
param = self.doc.createElement(name)
if values == []:
values = ['']
for value in values:
valueNode = self.doc.createElement("value")
if value.strip() != '':
valueNode.appendChild(self.doc.createTextNode( value ))
param.appendChild(valueNode)
self.vo.appendChild(param)
def getNode(self):
return self.root
##
# Following class/function extends the functionality of PrettyPrint
#
# As an output it produces text representation of DOM tree with similar
# formatting as in the gLite configuration files
#
# Usage
# from UltraPrint import UltraPrettyPrint
# from xml.dom.minidom import parse
# dom = parse('test.xml')
# UltraPrettyPrint(dom)
class UltraPrintVisitor(Printer.PrintVisitor):
import xml.dom.ext
length = 80
def visitAttr(self, node):
if node.namespaceURI == XMLNS_NAMESPACE:
# Skip namespace declarations
return
self._write('\n' + self._indent * (self._depth + 1) + node.name)
value = node.value
if value or not self._html:
text = Printer.TranslateCdata(value, self.encoding)
text, delimiter = Printer.TranslateCdataAttr(text)
while text.find(' ') > -1:
text = text.replace(' ',' ')
n_text = ''
while len(text) > self.length:
s_pos = text.rfind(' ', 0, self.length + 1)
if s_pos == -1:
s_pos = text.find(' ')
if s_pos == -1:
s_pos = len(text)
n_text = n_text + text[0:s_pos] + '\n' + self._indent * (self._depth + 2)
text = text[s_pos + 1:len(text)]
text = n_text + text
self.stream.write("=%s%s%s" % (delimiter, text, delimiter))
return
##
# Class implementing the XML output in the form used in the gLite
# configuration files
class gliteXMLWriter:
def __init__(self):
self.stream = sys.stdout
def setOutputStream(self, stream):
self.stream = stream
def writeFile(self, name, root, encoding='UTF-8', indent=' ',
preserveElements=None):
file = open(name,'w')
self.write(root, file, encoding, indent, preserveElements)
file.close()
def write(self, root, stream=None, encoding='UTF-8', indent=' ',
preserveElements=None):
stream = stream or self.stream
if not hasattr(root, "nodeType"):
return
nss_hints = '' #SeekNss(root)
preserveElements = preserveElements or []
owner_doc = root.ownerDocument or root
if hasattr(owner_doc, 'getElementsByName'):
#We don't want to insert any whitespace into HTML inline elements
preserveElements = preserveElements + HTML_4_TRANSITIONAL_INLINE
visitor = UltraPrintVisitor(stream, encoding, indent,
preserveElements, nss_hints)
Printer.PrintWalker(visitor, root).run()
stream.write('\n')
return
##
# Improved PrettyPrint functionality
#
# @param root root element of the XML tree to print
# @param stream stream to output to (should implement the Writer interface
# @param encoding XML encoding
# @param indent PrettyPrint indent
# @param preserveElements PrettyPrint preserveElements
def UltraPrettyPrint(root, stream=sys.stdout, encoding='UTF-8', indent=' ',
preserveElements=None):
if not hasattr(root, "nodeType"):
return
from xml.dom.ext import Printer
nss_hints = '' #SeekNss(root)
preserveElements = preserveElements or []
owner_doc = root.ownerDocument or root
if hasattr(owner_doc, 'getElementsByName'):
#We don't want to insert any whitespace into HTML inline elements
preserveElements = preserveElements + HTML_4_TRANSITIONAL_INLINE
visitor = UltraPrintVisitor(stream, encoding, indent,
preserveElements, nss_hints)
Printer.PrintWalker(visitor, root).run()
stream.write('\n')
return
#########################################################################
# End Ultra Pretty Print
#########################################################################
##
# method to list the tag names in the NodeList
# @param nodeList NodeList to work on
#
# @return list of the tag names
def getNodeNameList(nodeList):
list = []
for node in nodeList:
list.append(node.nodeName)
return list
def getContainerTemplate(containerName):
containerXml = parse(os.environ['FUNCTIONS_DIR'] + '/../libexec/gLiteContainers.xml')
containerParentNode = containerXml.getElementsByTagName(containerName)[0]
for node in containerParentNode.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
containerNode = node
break
return containerNode
|
from itertools import izip_longest
class Vector(object):
def __init__(self, nums):
self.nums = nums
def __str__(self):
return '({})'.format(','.join(str(a) for a in self.nums))
def add(self, v2):
return Vector([b + c for b, c in izip_longest(self.nums, v2.nums)])
def dot(self, v2):
return sum(d * e for d, e in izip_longest(self.nums, v2.nums))
def equals(self, v2):
return self.nums == v2.nums
def norm(self):
return sum(f ** 2 for f in self.nums) ** 0.5
def subtract(self, v2):
return Vector([g - h for g, h in izip_longest(self.nums, v2.nums)])
|
Nentr = int(input())
for i in range(0, Nentr):
N = int(input())
if 2015 - N < 0:
print('%d A.C.'%(N-2014))
elif 2015 - N == 0:
print('1 A.C.')
else:
print('%d D.C.'%(2015-N))
|
import hashlib
from urllib.parse import urlencode
from django import template
from django.conf import settings
register = template.Library()
@register.filter
def gravatar(user):
email = user.email.lower().encode('utf-8')
default = 'mm'
size = 256
url = 'https://lh3.googleusercontent.com/RtzFFVB7fqOvXW9w7OHziR_xq4muZW_UGhHJoDAgx1ghcgw9KBXbDycZrSeQilU8b51lxmH7LVT7psvHSqEAKddQkBz_DGgItxLZKiU5AP4gf7uGPb7cmDerq9-soqdASpLBzE5x8A5Z5gJC3CtypZiyBoQg7gNvc-YTsnrDOyWereQ50cEdxh-n7Hq_yPe3awOblYdYX-m-J7ZmZ12aND8gDnT16jwsHZZhRZ8Oz5_Hntf16JGwA3qgjQKZGxBK4-szMTERv-ynO3qcqmaVUD2YW5zxj279aLoLxNkF8y_VnNfuXoIEORLs-zuN6GC1RFVIZgfUvXXhAWblxBrUqhWdZT3DXSvJHpzs1rSjTv-2WFD8TdEzdzMdVt7NG-4j05YvrLBsjVYn05uNK3FihNQi638rIk670zNfH5VYz5tvTce8g7tBeBqI-Y85qv0f1irJj5vCCkMgQ-Yp7aXRsjx8da5tw9vVEzi15-ZsSKu7yuxx0TzH9Jl57y_92SKTd2lxEAq1WPVUMzdP9LYaG4J57boA5juaJVd1Gujh0oyHIZ_Am6WO3nkyKx8SadTOzzfgmpoGZf3bKneWOSJeOloDIIRYKo1tyE8TqmIVjTyVqOp9cwSDxFpQDBBcsQbhoJHKHrPvXXTUoZOzMtHjAmVJBKcL86meHNkYuI7m61AodjlPlELx3hs9DxwJhw=w655-h873-no?authuser=0'
return url
|
from typing import List
from Position import Position
def read_city_positions(file_path: str, skip_lines: int) -> List[Position]:
city_positions: List[Position] = []
with open(file_path, 'r') as reader:
lines_list: List[str] = reader.readlines()
for idx in range(skip_lines, len(lines_list)):
coords: List[str] = lines_list[idx].split()
city_positions.append(Position(int(coords[0]), int(coords[1])))
return city_positions
def read_best_roundtrip(file_path: str, skip_lines: int) -> List[int]:
city_idxs: List[int] = []
with open(file_path, 'r') as reader:
lines_list: List[str] = reader.readlines()
for idx in range(skip_lines, len(lines_list)):
city_idxs.append(int(lines_list[idx].split()[0])-1)
return city_idxs
|
from leetcode import test, TreeNode, new_tree
def rob(root: TreeNode) -> int:
def helper(node: TreeNode) -> (int, int):
if not node:
return 0, 0
left_max, left_no_rob = helper(node.left)
right_max, right_no_rob = helper(node.right)
return (
max(left_no_rob + right_no_rob + node.val, left_max + right_max),
left_max + right_max,
)
return helper(root)[0]
test(rob, [(new_tree(3, 4, 5, 1, 3, None, 1), 9)])
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 16:19:14 2020
@author: Anuj
"""
# making the imports
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Input,Conv2D,MaxPooling2D,Dropout,Flatten,Dense,Activation,BatchNormalization,add
from tensorflow.keras.models import Model,Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import plot_model
from tensorflow.keras.applications.vgg16 import VGG16,preprocess_input
import os
#Code for loading training and validation data at the time of training
base_dir = os.getcwd() #getting current directory
target_shape = (224,224) #defining the input shape
train_dir = base_dir+"\\chest_xray\\train" #
val_dir = base_dir+"\\chest_xray\\val" # -- Directories for data
test_dir = base_dir+"\\chest_xray\\test" #
# loading the VGG16 model with imagenet weights without the FC layers
vgg = VGG16(weights='imagenet',include_top=False,input_shape=(224,224,3))
for layer in vgg.layers:
layer.trainable = False #making all the layers non-trainable
x = Flatten()(vgg.output) #flattening out the last layer
predictions = Dense(2,activation='softmax')(x) #Dense layer to predict wether their is pneumonia or not
model = Model(inputs=vgg.input, outputs=predictions)
model.summary()
train_gen = ImageDataGenerator(rescale=1/255.0,
horizontal_flip=True,
zoom_range=0.2,
shear_range=0.2) # making the data loader for training data
test_gen = ImageDataGenerator(rescale=1/255.0) # making the data loader for validation data
train_data_gen = train_gen.flow_from_directory(train_dir,
target_shape,
batch_size=16,
class_mode='categorical') # function to make iterable object for training
test_data_gen = train_gen.flow_from_directory(test_dir,
target_shape,
batch_size=16,
class_mode='categorical') # function to make iterable object for training
plot_model(model, to_file='model.png')
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
hist = model.fit_generator(train_data_gen,
steps_per_epoch=20,
epochs=20,
validation_data=test_data_gen,
validation_steps=10)
plt.style.use("ggplot")
plt.figure()
plt.plot(hist.history["loss"], label="train_loss")
plt.plot(hist.history["val_loss"], label="val_loss")
plt.plot(hist.history["accuracy"], label="train_acc")
plt.plot(hist.history["val_accuracy"], label="val_acc")
plt.title("Model Training")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig("epochs.png")
model.save('model.h5')
|
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
import pandas as pd
from pandas import DataFrame
import dataflow_pipeline.sensus.sensus_seguimiento_beam as sensus_seguimiento_beam
import dataflow_pipeline.sensus.sensus_metas_beam as sensus_metas_beam
import dataflow_pipeline.sensus.sensus_adh_beam as sensus_adh_beam
import dataflow_pipeline.sensus.sensus_dmobility_beam as sensus_dmobility_beam
import dataflow_pipeline.sensus.sensus_dchat_beam as sensus_dchat_beam
import dataflow_pipeline.sensus.sensus_dcorreo_beam as sensus_dcorreo_beam
import dataflow_pipeline.sensus.sensus_banco_beam as sensus_banco_beam
import dataflow_pipeline.sensus.sensus_metalal_beam as sensus_metalal_beam
import dataflow_pipeline.sensus.sensus_poc_beam as sensus_poc_beam
import dataflow_pipeline.sensus.sensus_tecnicos_beam as sensus_tecnicos_beam
import dataflow_pipeline.sensus.sensus_agricolaadmin_beam as sensus_agricolaadmin_beam
import dataflow_pipeline.sensus.sensus_agricolacast_beam as sensus_agricolacast_beam
import dataflow_pipeline.sensus.sensus_estrategy_beam as sensus_estrategy_beam
import dataflow_pipeline.sensus.sensus_alter_beam as sensus_alter_beam
import procesos.descargas as descargas
import os
import socket
import requests
from flask import request
import csv
sensus_api = Blueprint('sensus_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
######################################################################################################
@sensus_api.route("/archivos_seguimiento")
def archivos_Seguimiento():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[18:26]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('info-aseguramiento/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.seguimiento` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_seguimiento_beam.run('gs://ct-sensus/info-aseguramiento/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
# return "Corriendo : " + mensaje
#####################################################################################################
@sensus_api.route("/metas")
def metas():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Metas/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[34:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('metas/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.metas` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_metas_beam.run('gs://ct-sensus/metas/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Metas/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
# return "Corriendo : " + mensaje
@sensus_api.route("/adh")
def adh():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/ADH/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[9:17]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('adh/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.adh` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_adh_beam.run('gs://ct-sensus/adh/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/ADH/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
#####################################################################################################
@sensus_api.route("/dmobility")
def dmobility():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Detallado/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('adh/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.dmobility` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_dmobility_beam.run('gs://ct-sensus/adh/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Detallado/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
#####################################################################################################
@sensus_api.route("/dchat")
def dchat():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Chat/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('adh/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.dchat` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_dchat_beam.run('gs://ct-sensus/adh/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Chat/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
#####################################################################################################
@sensus_api.route("/dcorreo")
def dcorreo():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Correo/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('adh/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.dcorreo` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_dcorreo_beam.run('gs://ct-sensus/adh/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Correo/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
#####################################################################################################
@sensus_api.route("/banco")
def banco():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Banco/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('adh/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.banco` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_banco_beam.run('gs://ct-sensus/adh/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Banco/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
#####################################################################################################
@sensus_api.route("/metalal")
def metalal():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Metalal/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('adh/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.banco` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_metalal_beam.run('gs://ct-sensus/adh/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Metalal/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
#####################################################################################################
@sensus_api.route("/poc")
def poc():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Poc/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('poc/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.poc` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_poc_beam.run('gs://ct-sensus/poc/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Poc/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se cargaron los ficheros exitosamente"
response["status"] = True
return jsonify(response), response["code"]
#####################################################################################################
@sensus_api.route("/tecnicos")
def tecnicos():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/tecnicos/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('tecnicos/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.tecnicos` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_tecnicos_beam.run('gs://ct-sensus/tecnicos/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/tecnicos/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se cargaron los ficheros exitosamente"
response["status"] = True
return jsonify(response), response["code"]
#####################################################################################################
@sensus_api.route("/agricolaa")
def agricolaa():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Dagricola/Administrativa/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('agricolaadmin/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.agricolaadm` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_agricolaadmin_beam.run('gs://ct-sensus/agricolaadmin/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Dagricola/Administrativa/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se cargaron los ficheros exitosamente"
response["status"] = True
return jsonify(response), response["code"]
#####################################################################################################
@sensus_api.route("/agricolac")
def agricolac():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Dagricola/Castigada/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('agricolacast/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.agricolacast` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_agricolacast_beam.run('gs://ct-sensus/agricolacast/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Dagricola/Castigada/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se cargaron los ficheros exitosamente"
response["status"] = True
return jsonify(response), response["code"]
######################################################################################################
@sensus_api.route("/descargar", methods=['POST','GET'])
def Descarga_Encuesta():
dateini= request.args.get('desde')
dateend= request.args.get('hasta')
myRoute = '/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Dagricola/Descargas/'+dateini+'_'+dateend+'.csv'
myQuery ='SELECT * FROM `contento-bi.sensus.Dagrciola_admin` where fecha between'+'"'+dateini+'"'+'AND'+'"'+dateend+'"'
myHeader = ["Unico","Nombre","Fecha","Nombre_gestor","Cartera","Nombre_evaluador","PEC","Telefono_cliente","Cierre_de_la_llamada","Evaluacion_del_Saludo","Evaluacion_negociacion","Aspectos_mejora","Aspectos_positivos","Observaciones"
]
return descargas.descargar_csv(myRoute, myQuery, myHeader)
######################################################################################################
@sensus_api.route("/estrategy")
def estrategy():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Estrategia/Estrategy/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('agricolacast/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.estrategy` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_estrategy_beam.run('gs://ct-sensus/agricolacast/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Estrategia/Estrategy/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se cargaron los ficheros exitosamente"
response["status"] = True
return jsonify(response), response["code"]
######################################################################################################
@sensus_api.route("/alter")
def alter():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Estrategia/Sinalternativas/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-sensus')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('agricolacast/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.sensus.alter` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = sensus_alter_beam.run('gs://ct-sensus/agricolacast/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Sensus/Aseguramiento/Estrategia/Sinalternativas/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se cargaron los ficheros exitosamente"
response["status"] = True
return jsonify(response), response["code"]
######################################################################################################
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" KafkaKeySerializer
Serialize string key to bytes & deserialize key in string
"""
from tonga.services.serializer.base import BaseSerializer
from tonga.services.serializer.errors import KeySerializerDecodeError, KeySerializerEncodeError
class KafkaKeySerializer(BaseSerializer):
""" Serialize kafka key to bytes
"""
@classmethod
def encode(cls, obj: str) -> bytes:
""" Encode key to bytes for kafka
Args:
obj (str): Key in string or bytes format
Raises:
KeySerializerEncodeError: this error was raised when KafkaKeySerializer can't serialize key
Returns:
bytes: Kafka key as bytes
"""
if isinstance(obj, str) and obj is not None:
return obj.encode('utf-8')
if isinstance(obj, bytes):
return obj
raise KeySerializerEncodeError
@classmethod
def decode(cls, encoded_obj: bytes) -> str:
""" Decode kafka key to str
Args:
encoded_obj (bytes): Kafka key in bytes
Raises:
KeySerializerDecodeError: this error was raised when KafkaKeySerializer can't deserialize key
Returns:
str: Key as string
"""
if isinstance(encoded_obj, bytes) and encoded_obj is not None:
return encoded_obj.decode('utf-8')
if isinstance(encoded_obj, str):
return encoded_obj
raise KeySerializerDecodeError
|
from leetcode.tree import printtree
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Queue(object):
def __init__(self):
self.stacka = []
self.stackb = []
self.queue = []
def inqueue(self, node):
if node:
self.queue.append(node.val)
if self.stacka is None:
self.stacka.append(node)
else:
while self.stacka:
x = self.stacka.pop()
self.stackb.append(x)
self.stacka.append(node)
while self.stackb:
y = self.stackb.pop()
self.stacka.append(y)
def outqueue(self):
if self.stacka:
x = self.stacka.pop()
self.queue.pop()
return x
else:
return None
class Solution(object):
# 递归版本
def scanRight(self, root):
ans = Queue()
if root is None:
return ans
if root.left is None and root.right is None:
ans.inqueue(root)
return ans
leftans = self.scanRight(self, root.left)
rightans = self.scanRight(self, root.right)
print("---")
print("当前节点:", root.val)
print("左:", leftans.queue)
print("右:", rightans.queue)
if root.left is None:
nqueue = Queue()
nqueue.inqueue(root)
x = rightans.outqueue()
while x:
nqueue.inqueue(x)
x = rightans.outqueue()
return nqueue
if root.right is None:
nqueue = Queue()
nqueue.inqueue(root)
x = leftans.outqueue()
while x:
nqueue.inqueue(x)
x = leftans.outqueue()
return nqueue
ans.inqueue(root)
while rightans.queue:
x = rightans.outqueue()
ans.inqueue(x)
leftans.outqueue()
while leftans.queue:
y = leftans.outqueue()
ans.inqueue(y)
print("合并:", ans.queue)
return ans
# 非递归版本
def rightSideView(self, root):
# 广度遍历
queue = Queue()
ret = []
count = 1
queue.inqueue(root)
while queue.stacka:
# 先遍历当前层次,并将下层入队,从左往右入队,最后一个作为输出
while count > 0:
node = queue.outqueue()
count -= 1
if node.left:
queue.inqueue(node.left)
if node.right:
queue.inqueue(node.right)
ret.append(node.val)
count = queue.queue.__len__()
return ret
#
# 5
# / \
# / \
# / \
# / \
# / \
# / \
# / \
# / \
# 4 8
# / \ / \
# / \ / \
# / \ / \
# / \ / \
# 11 N 13 4
# / \ / \ / \ / \
# / \ / \ / \ / \
# 7 2 N N N 9 15 N
if __name__ == "__main__":
solu = Solution
root = TreeNode(5)
n1 = TreeNode(4)
n2 = TreeNode(8)
root.left = n1
root.right = n2
n3 = TreeNode(11)
n4 = TreeNode(7)
n5 = TreeNode(2)
n1.left = n3
n3.left = n4
n3.right = n5
n6 = TreeNode(13)
n6.right = TreeNode(9)
n7 = TreeNode(4)
n7.left = TreeNode(15)
n2.left = n6
n2.right = n7
# n7.right = TreeNode(1)
printtree.Pretty_print(root)
# ans = solu.scanRight(solu, root)
ans = solu.rightSideView(solu, root)
print("_______")
print(ans)
|
from django.test import TestCase
from portfolio.models import Project
class ProjectModelTest(TestCase):
def test_creating_and_retrieving_projects(self):
first_project = Project()
first_project.title = '1st project'
first_project.description = '1st desc'
first_project.save()
second_project = Project()
second_project.title = '2nd project'
second_project.description = '2nd desc'
second_project.save()
projects = Project.objects.all()
self.assertEqual(projects.count(), 2)
first_saved_project = projects[0]
self.assertEqual(first_saved_project.title, '1st project')
self.assertEqual(first_saved_project.description, '1st desc')
second_saved_project = projects[1]
self.assertEqual(second_saved_project.title, '2nd project')
self.assertEqual(second_saved_project.description, '2nd desc')
class ImageModelTest(TestCase):
def test_adding_image(self):
pass
|
from logging import info, warning
from api import gitlab
from utilities import validate, types
gitlab = gitlab.GitLab(types.Arguments().url)
def get_all_project_members(project):
members = {}
info("[*] Fetching all members for project %s", project)
details = gitlab.get_project_members(project)
if validate.api_result(details):
warning("[*] Found %s members for project %s", len(details), project)
for item in details:
members.update({item['username']: item['web_url']})
return members
def get_all_group_members(group):
members = {}
info("[*] Fetching all members for group %s", group)
details = gitlab.get_group_members(group)
if validate.api_result(details):
warning("[*] Found %s members for group %s", len(details), group)
for item in details:
members.update({item['username']: item['web_url']})
return members
|
import re
email_file = "C:\\Users\\gk\\Documents\\myPython\\Day4\\contacts_regex.txt"
pattern = re.compile(r'@\w[\w\.\-]+\w')
with open(email_file,"r",encoding='utf8') as file :
dict_domain = dict()
for line in file.readlines() :
for domain in re.findall(pattern,line) :
if domain in dict_domain.keys() :
dict_domain[domain] += 1
else:
dict_domain[domain] = 1
print("len:", len(dict_domain))
print(dict_domain)
nameList = list(len(l.split(":")[0]) for l in dict_domain.keys() )
maxString = max(nameList)
shiftMaxStringFormat = '{:' + str(maxString) + '}'
print("\nshiftMaxStringFormat:")
print(shiftMaxStringFormat)
for d in sorted(dict_domain.keys()) :
print(shiftMaxStringFormat.format(d), "\t:\t", dict_domain[d])
|
from random import randint
from card import Card
from deck import Deck
from noble import Noble
from token import Token
from player import Player
from prettytable import PrettyTable
class Environment:
def __init__(self, playerCount):
self.__players = Player.initialize(playerCount)
self.__nobles = Noble.initalize(playerCount)
self.__decks = Deck.initialize()
self.__gemTokens = Token.initalize(playerCount)
self.__table = []
self.initializeTable()
self.initializeGame()
def getPlayers(self):
return self.__players
def getNobles(self):
return self.__nobles
def getDecks(self):
return self.__decks
def getGemTokens(self):
return self.__gemTokens
def getTable(self):
return self.__table
def initializeTable(self):
cardsToBeDrawn = 4
for deck in self.__decks:
drawnCards = []
for _ in range(cardsToBeDrawn):
drawnCards.append(deck.draw())
self.__table.append(drawnCards)
def displayNobles(self):
print("─────────────────────────────────────────────────────────────────────")
print("Nobles:")
header = ["Cost"]
nobleCosts = [["Diamond"], ["Sapphire"], ["Emerald"], ["Ruby"], ["Onyx"]]
for i, noble in enumerate(self.__nobles):
header.append("Noble #" + str(i + 1))
cardCost = noble.getCardCost()
for i, cost in enumerate(cardCost):
nobleCosts[i].append(str(cardCost[cost]))
t = PrettyTable(header)
for cost in nobleCosts:
t.add_row(cost)
print(t.get_string(title="Nobles"))
def displayTable(self):
print("─────────────────────────────────────────────────────────────────────")
decks = [self.__table[2], self.__table[1], self.__table[0]]
for i, deck in enumerate(decks):
header = ["Fields"]
cardDetails = [["Token"], ["Prestige"], ["Diamond"], ["Sapphire"], ["Emerald"], ["Ruby"], ["Onyx"]]
for cardNum, card in enumerate(deck):
header.append("Card #" + str(cardNum + 1))
cardCost = card.getCost()
cardDetails[0].append(card.getTokenType().title())
cardDetails[1].append(card.getPrestige())
cardDetails[2].append(cardCost["diamond"])
cardDetails[3].append(cardCost["sapphire"])
cardDetails[4].append(cardCost["emerald"])
cardDetails[5].append(cardCost["ruby"])
cardDetails[6].append(cardCost["onyx"])
t = PrettyTable(header)
for detail in cardDetails:
t.add_row(detail)
print("Tier " + str(len(decks) - i) + " Cards:")
print(t)
def displayGemTokens(self):
print("─────────────────────────────────────────────────────────────────────")
print("Available Gem Tokens:")
t = PrettyTable(["Gems", "Quantity"])
for gem in self.__gemTokens:
t.add_row([gem.title(), self.__gemTokens[gem]])
print(t)
def checkVisitingNobles(self, player):
visitingNobles = []
for noble in self.__nobles:
cardCost = noble.getCardCost()
playerCardTokens = player.getCardTokens()
isVisting = True
for cost in cardCost:
if playerCardTokens[cost] < cardCost[cost]:
isVisiting = False
if isVisting:
visitingNobles.append(noble)
# if len(visitingNobles) == 1:
# print("Noble has visited!")
# elif len(visitingNobles) > 1:
# print("Please select visiting noble")
def takeGemToken(self, amount, gem):
self.__gemTokens[gem] -= amount
def takeTableCard(self, deckTier, cardNum):
devCard = self.__table[deckTier - 1][cardNum - 1]
self.__table[deckTier - 1][cardNum - 1] = ""
return devCard
def drawTableCard(self):
# Replace Table Card if enough cards in same tier deck
for tier, deck in enumerate(self.__table):
for cardNum, card in enumerate(deck):
if card == "":
newCard = self.__decks[tier - 1].draw()
self.__table[tier][cardNum] = newCard
def initializeGame(self):
prestigeWinCondition = 15
run = True
while run:
for player in self.__players:
self.displayNobles()
self.displayTable()
self.displayGemTokens()
player.displayGemTokens()
player.actions(self)
self.drawTableCard()
self.checkVisitingNobles(player)
player.incrementTurns()
if player.getPrestige() >= prestigeWinCondition:
print("Player #" + player.getNumber() + " has won!")
run = False
|
#! /usr/bin/env python
import sys
import os
from numpy import *
from scipy import *
import scikits.audiolab as audiolab
import matplotlib.pylab as plt
project_path = sys.argv[1] # where the audio files are
recording_number = int(sys.argv[2]) # e.g. 9, for #09
pixclock_prefix = "PixClock "
velocity_threshold = 0.025
positive_transitions = []
negative_transitions = []
def reduce_runs(l):
last = -2
out = []
for i in l:
if i != last + 1:
out.append(i)
last = i
return out
for c in range(0,4):
filepath = os.path.join(project_path, "%s%d#%.2d.wav" % (pixclock_prefix, c+1, recording_number))
print(filepath)
(wav_data, rate, format) = audiolab.wavread(filepath)
wav_velocity = diff(wav_data)
smooth_wav_velocity = convolve(wav_velocity, ones((100)))
plt.plot(smooth_wav_velocity[0:-1:100])
plt.show()
print max(smooth_wav_velocity)
print min(smooth_wav_velocity)
positive_transitions.append(reduce_runs(where(smooth_wav_velocity > velocity_threshold)[0]))
negative_transitions.append(reduce_runs(where(smooth_wav_velocity < -velocity_threshold)[0]))
print("n items = %d" % (len(positive_transitions[-1])))
plt.figure()
plt.hold(True)
print(positive_transitions)
for c in range(0,len(positive_transitions)):
for t in positive_transitions[c]:
print("%d,%d +" % (c,t))
plt.plot(c, t, '+')
for t in negative_transitions[c]:
plt.plot(c, t, 'o')
print positive_transitions
plt.show()
|
def calcWei(Ns,Nf,Dur):
# calculate the weight matrix of the nodes in the directed graph using the
# information extracted from the file
n = 28
# define the number of nodes in the graph
wei = np.zeros((n,n),dtype=float)
# initialise the weight matrix
for i in range(len(Dur)):
# allocate the weights to their corresponding position in the weight matrix
wei[int(Ns[i]),int(Nf[i])] = Dur[i]
return wei
|
import torch
import csv
from Data_Loader import *
from model import ModelManager
if __name__ == '__main__':
model = ModelManager()
model.load_state_dict(torch.load("model_param/parameter_1.pkl",map_location='cpu'))
test_loader = DataProcess()
model.eval()
f = open('predict_1.csv', 'w', encoding='utf-8')
csv_writer = csv.writer(f)
num = 1
for i, data in enumerate(test_loader):
x, _, mask = data
x = x.long()
if torch.cuda.is_available():
x = x.cuda()
mask = mask.cuda()
output = model(x, mask)
# output [bs, 3]
values, index = output.max(dim=1)
predict = index.view(-1, 1)
for t in predict:
csv_writer.writerow([num, t.item()])
num += 1
print("读取第"+str(num)+"条数据")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
STEL Sistemas de Telecomunicações 2016/17 2S
Simulation of a Poisson call arrival process
Grupo:
'''
import csv
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import itertools
# Data
time_scale = []
value = []
poissonX = []
poissonY = []
# Time Scale - 1/(10*LAMBDA) = 0.0125
LAMBDA = 8
calls = None
# Open file and append values
with open('simulation_out.csv', 'r') as csvfile:
data = csv.reader(csvfile, delimiter = ',')
data_filter = itertools.ifilter(lambda row: not row[0].startswith('#'), data)
for row in data_filter:
time_scale.append(float(row[0]))
value.append(int(row[1]))
with open('simulation_out.csv', 'r') as csvfile:
data = csv.reader(csvfile, delimiter = ',')
data_filter = itertools.ifilter(lambda row: row[0].startswith('# Totalcalls'), data)
for row in data_filter:
calls = (''.join(filter(str.isdigit, row[0])))
# Printing values to be ploted for reference
print "Total number of calls: ", calls
print "Time scale array: ", time_scale
print "Total calls per interval: ", value
poissonX = ([k*0.0125+0.0125/2 for k in range(0,80)])
poissonY = (0.0125*LAMBDA*np.exp(-LAMBDA*np.asarray(poissonX)))
#print len(poissonX)
#print len(poissonY)
value_norm = (np.array(value))/float(calls)
#print (value_norm)
print "Normalized values: ", value_norm
# Plotting histogram
fig = plt.figure(num='Simulation of a Poisson call arrival process', figsize=(12, 10), dpi=80, facecolor='w', edgecolor='k')
# Plotting 1 subplot
ax1 = plt.subplot(2,1,1)
ax1.title.set_text('Histogram of the interval between the arrival of consecutive calls')
ax1.set_xlabel('Interval between the arrival of consecutive ccalls')
ax1.set_ylabel('Total number of calls')
plt.xticks(time_scale)
plt.setp(plt.xticks()[1], rotation=30, ha='right')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.bar(time_scale, value, width=0.0125, color='red', alpha=0.80, align='edge', edgecolor='k', label='Histogram of the interval between the arrival of consecutive calls')
plt.plot(np.array(time_scale)+(0.0125/2), value, 'b--', linewidth = 2, alpha=0.50, label='Histogram exponencial plot values')
plt.xlim([0,0.5])
plt.legend()
plt.tight_layout()
# Plotting 2 subplot
ax2 = plt.subplot(2,1,2)
ax2.title.set_text('Histogram of the interval between the arrival of consecutive calls normalized')
ax2.set_xlabel('Interval between the arrival of consecutive calls')
ax2.set_ylabel('Number of calls normalized')
plt.xticks(time_scale)
plt.setp(plt.xticks()[1], rotation=30, ha='right')
plt.bar(time_scale, value_norm, width=0.0125, color='red', alpha=0.80, align='edge', edgecolor='k' , label='Histogram of the interval between the arrival of consecutive calls normalized')
plt.plot(np.array(time_scale)+(0.0125/2), value_norm, 'b--', linewidth = 2, alpha=0.50, label='Histogram exponencial plot values')
plt.plot(poissonX, poissonY, 'g-' , linewidth = 2, alpha=0.50, label='Poisson exponencial theoretically predicted values')
plt.xlim([0,0.5])
plt.legend()
plt.tight_layout()
plt.show()
csvfile.close()
|
import datetime
from region import Region
from track import Track
from log import log
class ObjectTracker:
ttl = datetime.timedelta(seconds=2)
def __init__(self):
self.tracks = []
def process(self, region_proposals):
birthed, promoted, reaped = [], [], []
region_proposals = Region.merge_regions(region_proposals)
for region in region_proposals:
if not region.is_car():
continue
# associate with existing track
for track in self.tracks:
if track.matches(region):
track.update(region)
continue
birthed.append(Track(region))
# existing tracks are either promoted or reaped
for track in self.tracks:
if track.age() > ObjectTracker.ttl:
reaped.append(track)
else:
promoted.append(track.promote())
log.debug(f'birthed={len(birthed)} promoted={len(promoted)} reaped={len(reaped)}')
self.tracks = birthed + promoted
# return the reaped tracks so they can be saved
return reaped
|
from flask import request
from flask_restplus import Namespace, Resource, fields
api_operator = Namespace('operator', description='Requests to operator model.')
operator_fields = api_operator.model('CReate operator payload.', {
"username": fields.String,
"email": fields.String,
"password": fields.String,
})
@api_operator.route("/")
class Operator(Resource):
@api_operator.doc(body=operator_fields)
def post(self):
"""Create operator user.
"""
from app.controllers.operator_controller import OperatorController
post_data = request.get_json()
return OperatorController.create_operator_user(post_data)
@api_operator.route("/<operator_id>")
class OperatorId(Resource):
@api_operator.doc(params={
'operator_id': 'An operator id.',})
def get(self, operator_id):
"""Get operator by id.
"""
from app.controllers.operator_controller import OperatorController
return OperatorController.get_operator_by_id(operator_id)
|
import glob
sensor_list = []
excluded_files = [
"__init__",
"example",
"cansat_sensor"
]
# Get all sensors in the Sensors directory and import them
for sensorFile in glob.glob("*.py"):
# Remove the .py from the sensorFile
sensorFile = sensorFile[:sensorFile.find(".py")]
# Check if it is in the list of excluded files
if sensorFile in excluded_files:
continue
# If not, import it
sensor_list.append(__import__(sensorFile))
|
import chainer
import onnx
import pytest
import onnx_chainer
def pytest_addoption(parser):
parser.addoption(
'--value-check-runtime',
dest='value-check-runtime', default='onnxruntime',
choices=['skip', 'onnxruntime', 'mxnet'], help='select test runtime')
parser.addoption(
'--opset-versions', dest='opset-versions', default=None,
help='select opset versions, select from "min", "latest", '
'or a list of numbers like "9,10"')
@pytest.fixture(scope='function')
def disable_experimental_warning():
org_config = chainer.disable_experimental_feature_warning
chainer.disable_experimental_feature_warning = True
try:
yield
finally:
chainer.disable_experimental_feature_warning = org_config
@pytest.fixture(scope='function')
def check_model_expect(request):
selected_runtime = request.config.getoption('value-check-runtime')
if selected_runtime == 'onnxruntime':
from onnx_chainer.testing.test_onnxruntime import check_model_expect # NOQA
_checker = check_model_expect
elif selected_runtime == 'mxnet':
from onnx_chainer.testing.test_mxnet import check_model_expect
_checker = check_model_expect
else:
def empty_func(*args, **kwargs):
pass
_checker = empty_func
return _checker
@pytest.fixture(scope='function')
def target_opsets(request):
opsets = request.config.getoption('opset-versions')
min_version = onnx_chainer.MINIMUM_OPSET_VERSION
max_version = min(
onnx.defs.onnx_opset_version(), onnx_chainer.MAXIMUM_OPSET_VERSION)
if opsets is None:
return list(range(min_version, max_version + 1))
elif opsets == 'min':
return [min_version]
elif opsets == 'latest':
return [max_version]
else:
try:
versions = [int(i) for i in opsets.split(',')]
except ValueError:
raise ValueError('cannot convert {} to versions list'.format(
opsets))
return versions
|
month = int(input())
if month == 12 or month < 3:
print('winter')
elif month < 6:
print('spring')
elif month < 9:
print('summer')
else:
print('fall')
|
import torch
import os
import json
import random
import numpy as np
import argparse
from datetime import datetime
from tqdm import tqdm
from torch.nn import DataParallel
from itertools import islice, takewhile, repeat
from transformers import EncoderDecoderModel, BertTokenizerFast, BertModel
from dataset import TextDataset
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='设置使用哪些显卡')
parser.add_argument('--model_path', default='model/epoch_0/model.pth', type=str, required=False, help='模型位置')
args = parser.parse_args()
print('args:\n' + args.__repr__())
device = args.device
model_path = args.model_path
# device
os.environ["CUDA_VISIBLE_DEVICES"] = args.device # 此处设置程序使用哪些显卡
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('using device:', device)
# model
model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-multilingual-cased", "bert-base-multilingual-cased")
model.load_state_dict(torch.load(model_path))
model.eval()
# dataset
tokenizer = BertTokenizerFast.from_pretrained("bert-base-multilingual-cased")
# 打印参数量
num_parameters = 0
parameters = model.parameters()
for parameter in parameters:
num_parameters += parameter.numel()
print('number of parameters: {}'.format(num_parameters))
while True:
question = input('请输入问题:')
ids = tokenizer.encode(question)
input_ids = torch.tensor([ids], dtype=torch.long)
generated = model.generate(input_ids, decoder_start_token_id=model.config.decoder.pad_token_id)
answer = tokenizer.decode(generated[0,:])
print(answer)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
'''
Obtener justificaciones de un usuario
@author Ivan
@example python3 getJustificationsStockByUser.py userId justificationId date
@example python3 getJustificationsStockByUser.py e43e5ded-e271-4422-8e85-9f1bc0a61235 fa64fdbd-31b0-42ab-af83-818b3cbecf46 01/05/2015
'''
import sys
sys.path.insert(0, '../../../python')
import inject
import logging
import asyncio
import datetime
from asyncio import coroutine
from autobahn.asyncio.wamp import ApplicationSession
from model.config import Config
###### configuracion #####
logging.getLogger().setLevel(logging.DEBUG)
def config_injector(binder):
binder.bind(Config, Config('server-config.cfg'))
inject.configure(config_injector)
config = inject.instance(Config)
###### parametros #####
userId = sys.argv[1]
justificationId = sys.argv[2]
dateParam = sys.argv[3]
class WampMain(ApplicationSession):
def __init__(self, config=None):
logging.debug('instanciando')
ApplicationSession.__init__(self, config)
self.serverConfig = inject.instance(Config)
@coroutine
def onJoin(self, details):
logging.debug('********** getJustificationsStockByUser **********')
date = datetime.datetime.strptime(dateParam, "%d/%m/%Y %H:%M:%S")
justificationsStock = yield from self.call('assistance.justifications.getJustificationsStockByUser', 1, userId, justificationId, date, None)
print(justificationsStock)
sys.exit()
if __name__ == '__main__':
from autobahn.asyncio.wamp import ApplicationRunner
from autobahn.wamp.serializer import JsonSerializer
url = config.configs['server_url']
realm = config.configs['server_realm']
debug = config.configs['server_debug']
json = JsonSerializer()
runner = ApplicationRunner(url=url, realm=realm, debug=debug, debug_wamp=debug, debug_app=debug, serializers=[json])
runner.run(WampMain)
|
# Author: ambiguoustexture
# Date: 2020-03-11
import pickle
import numpy as np
from scipy import io
from similarity_cosine import sim_cos
file_context_matrix_X_PC = './context_matrix_X_PC'
file_t_index_dict = './t_index_dict'
with open(file_t_index_dict, 'rb') as t_index_dict:
t_index_dict = pickle.load(t_index_dict)
context_matrix_X_PC = io.loadmat(file_context_matrix_X_PC)['context_matrix_X_PC']
word_England = context_matrix_X_PC[t_index_dict['England']]
words_similarities = [sim_cos(word_England, context_matrix_X_PC[i])
for i in range(len(t_index_dict))]
words_similarities_sorted = np.argsort(words_similarities)
words = list(t_index_dict.keys())
for index in words_similarities_sorted[-2:-12:-1]:
print(words[index].ljust(12, ' '), words_similarities[index])
|
import requests
import logging
import binascii
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.81 Safari/537.36',
}
def download(url, binary=False):
logging.debug('download: %s', url)
response = requests.get(url, headers=headers)
if response.status_code == requests.codes.ok:
if binary:
# data = binascii.unhexlify(data)
data = response.content
else:
response.encoding = 'utf8'
data = response.text
logging.debug('content: %s', data)
logging.debug('length: %.2f KB', len(data) / 1000.0)
else:
raise Exception()
return data
|
num_of_list = int(input("Enter the length of list-:"))
number_perm = []
for num in range(num_of_list):
list_num = int(input("Enter the number in the list-:"))
number_perm.append(list_num)
def permutation(number_perm):
if len(number_perm) == 0:
return []
if len(number_perm) == 1:
return [number_perm]
next_list = []
for i in range(len(number_perm)):
m = number_perm[i]
remlist = number_perm[:i] + number_perm[i+1:]
for p in permutation(remlist):
next_list.append([m] + p)
return next_list
data = number_perm
for p in permutation(data):
print(p)
|
import os
import sys
import zipfile
def unzip(filename: str):
try:
file = zipfile.ZipFile(filename)
dirname = filename.replace('.zip', '')
# 如果存在与压缩包同名文件夹 提示信息并跳过
if os.path.exists(dirname):
print(f'{filename} dir has already existed')
return
else:
# 创建文件夹,并解压
os.mkdir(dirname)
file.extractall(dirname)
file.close()
# 递归修复编码
rename(dirname)
except:
print(f'{filename} unzip fail')
def rename(pwd: str, filename=''):
"""压缩包内部文件有中文名, 解压后出现乱码,进行恢复"""
path = f'{pwd}/{filename}'
if os.path.isdir(path):
for i in os.scandir(path):
rename(path, i.name)
newname = filename.encode('cp437').decode('gbk')
os.rename(path, f'{pwd}/{newname}')
def main():
"""如果指定文件,则解压目标文件,否则解压当前目录下所有文件"""
if len(sys.argv) != 1:
i: str
for i in sys.argv:
if i.endswith('.zip') and os.path.isfile(i):
unzip(i)
else:
for file in os.scandir(os.getcwd()):
if file.name.endswith('.zip') and file.is_file():
unzip(file.name)
def get_filelist(dir):
Filelist = []
for home, dirs, files in os.walk(dir):
for filename in files:
# 文件名列表,包含完整路径
Filelist.append(os.path.join(home, filename))
# # 文件名列表,只包含文件名
# Filelist.append( filename)
return Filelist
if __name__ == '__main__':
Filelist = get_filelist('D:\\chengxu\\SoftwareEngineering\\probabilityTheory2\\simpleFirstCode')
print(len(Filelist))
for file in Filelist:
print(file)
unzip(file)
|
# coding:utf-8
# From 180103
# add function check_lighten()
import sys
from datetime import datetime
import socket
import json
sys.path.append('Users/better/PycharmProjects/GUI_Qt5/Intersection_Ex_2')
import rec_funcs
import copy
import new_Rect
class IM():
def __init__(self):
# preparation as a server
self.server_address = ('localhost', 6792)
self.max_size = 4096
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.server.bind(self.server_address)
self.STOP_CHAT = True
# load vehicle info
self.f = open('IM_00.json', 'r')
self.sendData = json.load(self.f)
self.f.close()
# Initiate intersection grid
self.grid = {}
self.intersec_grid = []
self.t_ahead = 35
for i in range(600, 660, 10):
for j in range(600, 660, 10):
self.grid[(i, j)] = True
# whole time step that IM will predict in current time_step
for i in range(self.t_ahead + 1):
self.intersec_grid.append(copy.deepcopy(self.grid))
# Initiate veh rotating angle
self.veh_num = 76
self.r = []
for i in range(self.veh_num):
self.r.append(0)
# Initiate bezier curve parameter
self.beze_t = []
self.up_left_x = []
self.up_left_y = []
self.down_left_x = []
self.down_left_y = []
self.up_right_x = []
self.up_right_y = []
self.down_right_x = []
self.down_right_y = []
for i in range(self.veh_num):
self.beze_t.append(2)
self.up_left_x.append(0)
self.up_left_y.append(0)
self.down_left_x.append(0)
self.down_left_y.append(0)
self.up_right_x.append(0)
self.up_right_y.append(0)
self.down_right_x.append(0)
self.down_right_y.append(0)
# Initiate time step
self.time_step = 0
def sendResult(self):
while self.STOP_CHAT:
self.check = 0
print('starting the server at', datetime.now())
print('waiting for a client to call.')
data, client = self.server.recvfrom(self.max_size)
data = data.decode('utf-8')
recData = json.loads(data)
# print(recData)
veh_id = recData["Veh_id"]
current = tuple(recData["current_position"])
origin = tuple(recData["origin_4"])
destination = tuple(recData["destination_4"])
speed = recData["speed"]
current_time = recData["time_step"]
pattern = recData["pattern"]
if pattern == 11:
if self.light_veh_pattern11(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 12:
if self.light_veh_pattern12(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 13:
if self.light_veh_pattern13(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 14:
if self.light_veh_pattern14(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 21:
if self.light_veh_pattern21(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 22:
if self.light_veh_pattern22(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 23:
if self.light_veh_pattern23(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 24:
if self.light_veh_pattern24(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 31:
if self.light_veh_pattern31(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 32:
if self.light_veh_pattern32(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
elif pattern == 33:
if self.light_veh_pattern33(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
else:
if self.light_veh_pattern34(veh_id, current, origin, destination, speed, current_time):
self.sendData[recData["Veh_id"]]["result"] = 1
else:
self.sendData[recData["Veh_id"]]["result"] = 0
# Send Json
mes = bytes(json.dumps(self.sendData[recData["Veh_id"]]), encoding='utf-8')
self.server.sendto(mes, client)
self.server.close()
# function to Initiate intersection grid
def init_intersec_grid(self, t_ahead):
for k in range(t_ahead):
for i in range(600, 660, 10):
for j in range(600, 660, 10):
self.intersec_grid[k][(i, j)] = True
# function to update intersection grid
def update_intersec_grid(self, current_time, current_time_step, veh_num):
sa = current_time - current_time_step
self.time_step = current_time
for i in range(sa):
self.intersec_grid.append(copy.deepcopy(self.grid))
del self.intersec_grid[0]
self.r[veh_num] = 0
# check whether grid has already been lighten up
def collision(self, veh_num, x, y, time):
if (x // 10 * 10, y // 10 * 10) in self.intersec_grid[time]:
if self.intersec_grid[time][(x // 10 * 10, y // 10 * 10)] == False:
self.beze_t[veh_num] = 2
return False
else:
return True
else:
return True
def check_lighten(self, veh_num, up_left_x, up_left_y, up_right_x, up_right_y, down_left_x, down_left_y, down_right_x, down_right_y, time):
# Up left
if not self.collision(veh_num, up_left_x, up_left_y, time):
print("upleft, pattern2", veh_num)
return False
# Up right
if not self.collision(veh_num, up_right_x, up_right_y, time):
print("upright, pattern2", veh_num)
return False
# Down left
if not self.collision(veh_num, down_left_x, down_left_y, time):
print("downleft, pattern2", veh_num)
return False
# Down right
if ((down_right_x) // 10 * 10, (down_right_y) // 10 * 10) in \
self.intersec_grid[time]:
if self.intersec_grid[time][(down_right_x // 10 * 10,
down_right_y // 10 * 10)] == False:
print("downright, pattern2", veh_num)
self.beze_t[veh_num] = 2
return False
# lighten up all four points together
else:
if (up_left_x // 10 * 10, up_left_y // 10 * 10) in self.intersec_grid[time]:
self.intersec_grid[time][
(up_left_x // 10 * 10, up_left_y // 10 * 10)] = False
if (up_right_x // 10 * 10, up_right_y // 10 * 10) in self.intersec_grid[
time]:
self.intersec_grid[time][
(up_right_x // 10 * 10, up_right_y // 10 * 10)] = False
if (down_left_x // 10 * 10, down_left_y // 10 * 10) in self.intersec_grid[
time]:
self.intersec_grid[time][
(down_left_x // 10 * 10, down_left_y // 10 * 10)] = False
if (down_right_x // 10 * 10, down_right_y // 10 * 10) in self.intersec_grid[
time]:
self.intersec_grid[time][(
down_right_x // 10 * 10, down_right_y // 10 * 10)] = False
# lighten up all three points together
else:
if (up_left_x // 10 * 10, up_left_y // 10 * 10) in self.intersec_grid[
time]:
self.intersec_grid[time][
(up_left_x // 10 * 10, up_left_y // 10 * 10)] = False
if (up_right_x // 10 * 10, up_right_y // 10 * 10) in \
self.intersec_grid[time]:
self.intersec_grid[time][
(up_right_x // 10 * 10, up_right_y // 10 * 10)] = False
if (down_left_x // 10 * 10, down_left_y // 10 * 10) in \
self.intersec_grid[time]:
self.intersec_grid[time][
(down_left_x // 10 * 10, down_left_y // 10 * 10)] = False
# situation that middle grid exists
# x coordinate is the reason
if abs(self.up_left_x[veh_num] - self.up_right_x[veh_num]) > 10:
if ((self.up_left_x[veh_num] + 10) // 10 * 10, self.up_left_y[veh_num] // 10 * 10) in \
self.intersec_grid[time]:
self.intersec_grid[time][
((self.up_left_x[veh_num] + 10) // 10 * 10, self.up_left_y[veh_num] // 10 * 10)] = False
if ((self.up_left_x[veh_num] + 10) // 10 * 10, self.down_left_y[veh_num] // 10 * 10) in \
self.intersec_grid[time]:
self.intersec_grid[time][
((self.up_left_x[veh_num] + 10) // 10 * 10,self.down_left_y[veh_num] // 10 * 10)] = False
# y coordinate is the reason
if abs(self.up_left_y[veh_num] - self.down_left_y[veh_num]) > 10:
if (self.up_left_x[veh_num] // 10 * 10, (self.up_left_y[veh_num] + 10) // 10 * 10) in \
self.intersec_grid[time]:
self.intersec_grid[time][
(self.up_left_x[veh_num] // 10 * 10, (self.up_left_y[veh_num] + 10) // 10 * 10)] = False
if (self.up_right_x[veh_num] // 10 * 10, (self.up_left_y[veh_num] + 10) // 10 * 10) in \
self.intersec_grid[time]:
self.intersec_grid[time][
(self.up_right_x[veh_num] // 10 * 10, (self.up_left_y[veh_num] + 10) // 10 * 10)] = False
return True
# vehicles travel from N_1 to W_6
# origin and destination is a pattern of (x,y)
def light_veh_pattern11(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# To light up grid(270, 270)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern11')
# Before veh get out of the intersection
while new_position[1] < destination[1]:
# Check if all parts of veh have been in intersection
if new_position[1] == 594:
if not self.intersec_grid[time][(640, 600)]:
print('firstgrid')
return False
else:
new_position = (origin[0], origin[1])
check_first = True
# print("check p11 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
else:
# Calculate trajectory by using Bezier Curve
x = pow(1 - (self.beze_t[veh_num] / 50), 2) * origin[0] + 2 * (self.beze_t[veh_num] / 50) * (
1 - self.beze_t[veh_num] / 50) * origin[0] + pow(
self.beze_t[veh_num] / 50, 2) * destination[0]
y = pow(1 - (self.beze_t[veh_num] / 50), 2) * origin[1] + 2 * (self.beze_t[veh_num] / 50) * (
1 - self.beze_t[veh_num] / 50) * destination[1] + pow(
self.beze_t[veh_num] / 50, 2) * destination[1]
new_position = (x, y)
self.beze_t[veh_num] += 2
print(new_position[1])
print(origin[1])
print(-(new_position[1] - (origin[1] + speed)) / 50)
# Calculate rotation angle
if 15.0 < (-(origin[1] - (new_position[1] + speed)) / 50) * 90 <= 90.0:
self.r[veh_num] = (-(origin[1] - (new_position[1] + speed)) / 50) * 90
elif (-(origin[1] - (new_position[1] + speed)) / 50) * 90 > 90:
self.r[veh_num] = 90
else:
self.r[veh_num] = 0
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num],
self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num],
self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
if check_first:
self.intersec_grid[time][(640, 600)] = False
check_first = False
# print("check p11 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position, 'r', self.r[veh_num])
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
def light_veh_pattern12(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# To light up grid(270, 270)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern12')
# Before veh get out of the intersection
while new_position[1] > destination[1]:
# Check if all parts of veh have been in intersection
if new_position[1] == 666:
if not self.intersec_grid[time][(610, 650)]:
print('firstgrid')
return False
else:
new_position = (origin[0], origin[1])
check_first = True
# print("check p12 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
else:
# Calculate trajectory by using Bezier Curve
x = pow(1 - (self.beze_t[veh_num] / 50), 2) * origin[0] + 2 * (self.beze_t[veh_num] / 50) * (
1 - self.beze_t[veh_num] / 50) * origin[0] + pow(
self.beze_t[veh_num] / 50, 2) * destination[0]
y = pow(1 - (self.beze_t[veh_num] / 50), 2) * origin[1] + 2 * (self.beze_t[veh_num] / 50) * (
1 - self.beze_t[veh_num] / 50) * destination[1] + pow(
self.beze_t[veh_num] / 50, 2) * destination[1]
new_position = (x, y)
self.beze_t[veh_num] += 2
# Calculate rotation angle
if 15.0 < ((origin[1] - (new_position[1] + speed)) / 50) * 90 <= 90.0:
self.r[veh_num] = ((origin[1] - (new_position[1] + speed)) / 50) * 90
elif ((origin[1] - (new_position[1] + speed)) / 50) * 90 > 90:
self.r[veh_num] = 90
else:
self.r[veh_num] = 0
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num],
self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num],
self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
if check_first:
self.intersec_grid[time][(610, 650)] = False
check_first = False
# print("check p12 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position, 'r', self.r[veh_num])
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
# vehicles travel from W_1 to S_6
# origin and destination is a pattern of (x,y)
def light_veh_pattern13(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# To light up grid(270, 270)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern13')
# Before veh get out of the intersection
while new_position[0] < destination[0]:
# Check if all parts of veh have been in intersection
if new_position[0] == 594:
if not self.intersec_grid[time][(600, 610)]:
print('firstgrid')
return False
else:
new_position = (origin[0], origin[1])
check_first = True
# print("check p13 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position, 'r', self.r[veh_num])
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
else:
# Calculate trajectory by using Bezier Curve
x = pow(1 - (self.beze_t[veh_num] / 50), 2) * origin[0] + 2 * (self.beze_t[veh_num] / 50) * (
1 - self.beze_t[veh_num] / 50) * destination[0] + pow(
self.beze_t[veh_num] / 50, 2) * destination[0]
y = pow(1 - (self.beze_t[veh_num] / 50), 2) * origin[1] + 2 * (self.beze_t[veh_num] / 50) * (
1 - self.beze_t[veh_num] / 50) * origin[1] + pow(
self.beze_t[veh_num] / 50, 2) * destination[1]
new_position = (x, y)
self.beze_t[veh_num] += 2
# Calculate rotation angle
if 15.0 < (-(origin[0] - (new_position[0] + speed)) / 50) * 90 <= 90.0:
self.r[veh_num] = (-(origin[0] - (new_position[0] + speed)) / 50) * 90
elif (-(origin[0] - (new_position[0] + speed)) / 50) * 90 > 90:
self.r[veh_num] = 90
else:
self.r[veh_num] = 0
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num],
self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num],
self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
if check_first:
self.intersec_grid[time][(600, 610)] = False
check_first = False
# print("check p13 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position, 'r', self.r[veh_num])
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
# vehicles travel from E_5 to N_2
# origin and destination is a pattern of (x,y)
def light_veh_pattern14(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# To light up grid(270, 270)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern14')
# Before veh get out of the intersection
while new_position[0] > destination[0]:
# Check if all parts of veh have been in intersection
if new_position[0] == 666:
if not self.intersec_grid[time][(650, 640)]:
print('firstgrid')
return False
else:
new_position = (origin[0], origin[1])
check_first = True
# print("check p14 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
else:
# Calculate trajectory by using Bezier Curve
x = pow(1 - (self.beze_t[veh_num] / 50), 2) * origin[0] + 2 * (self.beze_t[veh_num] / 50) * (
1 - self.beze_t[veh_num] / 50) * destination[0] + pow(
self.beze_t[veh_num] / 50, 2) * destination[0]
y = pow(1 - (self.beze_t[veh_num] / 50), 2) * origin[1] + 2 * (self.beze_t[veh_num] / 50) * (
1 - self.beze_t[veh_num] / 50) * origin[1] + pow(
self.beze_t[veh_num] / 50, 2) * destination[1]
new_position = (x, y)
self.beze_t[veh_num] += 2
# Calculate rotation angle
if 15.0 < ((origin[0] - (new_position[0] + speed)) / 50) * 90 <= 90.0:
self.r[veh_num] = ((origin[0] - (new_position[0] + speed)) / 50) * 90
elif ((origin[0] - (new_position[0] + speed)) / 50) * 90 > 90:
self.r[veh_num] = 90
else:
self.r[veh_num] = 0
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_t_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num],
self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num],
self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
if check_first:
self.intersec_grid[time][(650, 640)] = False
check_first = False
# print("check p14 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position, 'r', self.r[veh_num])
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
# vehicles travel from N_5 to S_5
# origin and destination is a pattern of (x,y)
def light_veh_pattern21(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# to light up grid(320, 300)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern21')
# Before veh get out of the intersection
while new_position[1] < destination[1]:
# Calculate trajectory by using Bezier Curve
x = new_position[0]
y = new_position[1] + speed
new_position = (x, y)
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num], self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num], self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
print("check p21 current_time", current_time)
# print(self.intersec_grid[time])
# print('time', time)
# print('veh_num', veh_num)
# print(self.beze_t)
# print(self.beze_t[veh_num])
print('new_position', new_position)
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
def light_veh_pattern22(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# to light up grid(320, 300)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern22')
# Before veh get out of the intersection
while new_position[1] > destination[1]:
# Calculate trajectory by using Bezier Curve
x = new_position[0]
y = new_position[1] + speed
new_position = (x, y)
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num], self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num], self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
# print("check p22 current_time", current_time)
# print(self.intersec_grid[time])
# print('time', time)
# print('veh_num', veh_num)
# print(self.beze_t)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
def light_veh_pattern23(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# to light up grid(320, 300)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern22')
# Before veh get out of the intersection
while new_position[0] < destination[0]:
# Calculate trajectory by using Bezier Curve
x = new_position[0] + speed
y = new_position[1]
new_position = (x, y)
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num], self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num], self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
# print("check p23 current_time", current_time)
# print(self.intersec_grid[time])
# print('time', time)
# print('veh_num', veh_num)
# print(self.beze_t)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
def light_veh_pattern24(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# to light up grid(320, 300)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern22')
# Before veh get out of the intersection
while new_position[0] > destination[0]:
# Calculate trajectory by using Bezier Curve
x = new_position[0] + speed
y = new_position[1]
new_position = (x, y)
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num], self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num], self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
# print("check p24 current_time", current_time)
# print(self.intersec_grid[time])
# print('time', time)
# print('veh_num', veh_num)
# print(self.beze_t)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
# vehicles travel from N_5 to E_2
# origin and destination is a pattern of (x,y)
def light_veh_pattern31(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# To light up grid(270, 270)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern31')
# Before veh get out of the intersection
while new_position[1] < destination[1]:
if new_position[1] == 594:
if not self.intersec_grid[time][(640, 600)]:
print('firstgrid')
return False
else:
new_position = (origin[0], origin[1])
check_first = True
# print("check p31 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
else:
# All parts of veh have been in intersection
# Calculate trajectory by using Bezier Curve
x = pow(1 - (self.beze_t[veh_num] / 20), 2) * origin[0] + 2 * (self.beze_t[veh_num] / 20) * (
1 - self.beze_t[veh_num] / 20) * origin[0] + pow(
self.beze_t[veh_num] / 20, 2) * destination[0]
y = pow(1 - (self.beze_t[veh_num] / 20), 2) * origin[1] + 2 * (self.beze_t[veh_num] / 20) * (
1 - self.beze_t[veh_num] / 20) * destination[1] + pow(
self.beze_t[veh_num] / 20, 2) * destination[1]
new_position = (x, y)
# Calculate rotation angle
if 15.0 < (-(origin[1] - (new_position[1] + speed)) / 20) * 90 <= 90.0:
self.r[veh_num] = -(-(origin[1] - (new_position[1] + speed)) / 20) * 90
elif (-(origin[1] - (new_position[1] + speed)) / 20) * 90 > 90:
self.r[veh_num] = -90
else:
self.r[veh_num] = 0
self.beze_t[veh_num] += 2
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num],
self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num],
self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
if check_first:
self.intersec_grid[time][(640, 600)] = False
check_first = False
# print("check p31 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position, 'r:', self.r[veh_num])
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
# vehicles travel from S_3 to W_4
# origin and destination is a pattern of (x,y)
def light_veh_pattern32(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# To light up grid(270, 270)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern32')
# Before veh get out of the intersection
while new_position[1] > destination[1]:
if new_position[1] == 666:
if not self.intersec_grid[time][(610, 650)]:
print('firstgrid')
return False
else:
new_position = (origin[0], origin[1])
check_first = True
# print("check p32 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
else:
# All parts of veh have been in intersection
# Calculate trajectory by using Bezier Curve
x = pow(1 - (self.beze_t[veh_num] / 20), 2) * origin[0] + 2 * (self.beze_t[veh_num] / 20) * (
1 - self.beze_t[veh_num] / 20) * origin[0] + pow(
self.beze_t[veh_num] / 20, 2) * destination[0]
y = pow(1 - (self.beze_t[veh_num] / 20), 2) * origin[1] + 2 * (self.beze_t[veh_num] / 20) * (
1 - self.beze_t[veh_num] / 20) * destination[1] + pow(
self.beze_t[veh_num] / 20, 2) * destination[1]
new_position = (x, y)
# Calculate rotation angle
if 15.0 < ((origin[1] - (new_position[1] + speed)) / 20) * 90 <= 90.0:
self.r[veh_num] = ((origin[1] - (new_position[1] + speed)) / 20) * 90
elif ((origin[1] - (new_position[1] + speed)) / 20) * 90 > 90:
self.r[veh_num] = 90
else:
self.r[veh_num] = 0
self.beze_t[veh_num] += 2
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num],
self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num],
self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
if check_first:
self.intersec_grid[time][(610, 650)] = False
check_first = False
# print("check p32 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position, 'r:', self.r[veh_num])
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
# vehicles travel from W_2 to N_2
# origin and destination is a pattern of (x,y)
def light_veh_pattern33(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# To light up grid(270, 270)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern33')
# Before veh get out of the intersection
while new_position[0] < destination[0]:
if new_position[0] == 594:
if not self.intersec_grid[time][(600, 610)]:
print('firstgrid')
return False
else:
new_position = (origin[0], origin[1])
check_first = True
# print("check p33 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
else:
# All parts of veh have been in intersection
# Calculate trajectory by using Bezier Curve
x = pow(1 - (self.beze_t[veh_num] / 20), 2) * origin[0] + 2 * (self.beze_t[veh_num] / 20) * (
1 - self.beze_t[veh_num] / 20) * destination[0] + pow(
self.beze_t[veh_num] / 20, 2) * destination[0]
y = pow(1 - (self.beze_t[veh_num] / 20), 2) * origin[1] + 2 * (self.beze_t[veh_num] / 20) * (
1 - self.beze_t[veh_num] / 20) * origin[1] + pow(
self.beze_t[veh_num] / 20, 2) * destination[1]
new_position = (x, y)
# Calculate rotation angle
if 15.0 < -((origin[0] - (new_position[0] + speed)) / 20) * 90 <= 90.0:
self.r[veh_num] = -(-((origin[0] - (new_position[0] + speed)) / 20)) * 90
elif -((origin[0] - (new_position[0] + speed)) / 20) * 90 > 90:
self.r[veh_num] = -90
else:
self.r[veh_num] = 0
self.beze_t[veh_num] += 2
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num],
self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num],
self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
if check_first:
self.intersec_grid[time][(600, 610)] = False
check_first = False
# print("check p33 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position, 'r:', self.r[veh_num])
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
# vehicles travel from E_5 to S_5
# origin and destination is a pattern of (x,y)
def light_veh_pattern34(self, veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
# To light up grid(270, 270)
check_first = False
# Initiate intersection grid
if self.time_step == 0:
self.init_intersec_grid(self.t_ahead)
if current_time > self.time_step:
self.update_intersec_grid(current_time, self.time_step, veh_num)
print('Pattern34')
# Before veh get out of the intersection
while new_position[0] > destination[0]:
if new_position[0] == 666:
if not self.intersec_grid[time][(650, 640)]:
print('firstgrid')
return False
else:
new_position = (origin[0], origin[1])
check_first = True
# print("check p34 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position)
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
else:
# All parts of veh have been in intersection
# Calculate trajectory by using Bezier Curve
x = pow(1 - (self.beze_t[veh_num] / 20), 2) * origin[0] + 2 * (self.beze_t[veh_num] / 20) * (
1 - self.beze_t[veh_num] / 20) * destination[0] + pow(
self.beze_t[veh_num] / 20, 2) * destination[0]
y = pow(1 - (self.beze_t[veh_num] / 20), 2) * origin[1] + 2 * (self.beze_t[veh_num] / 20) * (
1 - self.beze_t[veh_num] / 20) * origin[1] + pow(
self.beze_t[veh_num] / 20, 2) * destination[1]
# Calculate rotation angle
if ((origin[0] - (new_position[0] + speed)) / 20) * 90 > 15:
self.r[veh_num] = -((origin[0] - (new_position[0] + speed)) / 20) * 90
elif ((origin[0] - (new_position[0] + speed)) / 20) * 90 > 90:
self.r[veh_num] = -90
else:
self.r[veh_num] = 0
self.beze_t[veh_num] += 2
new_position = (x, y)
# Calculate the big Square's coordinate
(self.up_left_x[veh_num], self.up_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[0]
(self.down_left_x[veh_num], self.down_left_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[1]
(self.up_right_x[veh_num], self.up_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[2]
(self.down_right_x[veh_num], self.down_right_y[veh_num]) = new_Rect.new_v_rec(x, y, 0)[3]
if not self.check_lighten(veh_num, self.up_left_x[veh_num], self.up_left_y[veh_num],
self.up_right_x[veh_num], self.up_right_y[veh_num],
self.down_left_x[veh_num], self.down_left_y[veh_num],
self.down_right_x[veh_num], self.down_right_y[veh_num], time):
return False
if check_first:
self.intersec_grid[time][(650, 640)] = False
check_first = False
# print("check p34 current_time", current_time)
# print('time', time)
# print(self.beze_t[veh_num])
# print('new_position', new_position, 'r', self.r[veh_num])
# print(self.up_left_x[veh_num], self.up_left_y[veh_num])
# print(self.up_right_x[veh_num], self.up_right_y[veh_num])
# print(self.down_left_x[veh_num], self.down_left_y[veh_num])
# print(self.down_right_x[veh_num], self.down_right_y[veh_num])
# print(self.intersec_grid[time])
time += 1
# Initiate beze_t
self.beze_t[veh_num] = 2
return True
if __name__ == '__main__':
test = IM().sendResult()
# IM.sendResult()
# sys.exit(test.exec_())
|
from collections import OrderedDict
from network.rnn_5 import CLSTM_cell
# build model
# in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]
convlstm_encoder_params = [
[
OrderedDict({'conv1_leaky_1': [2, 16, 3, 1, 1]}),
OrderedDict({'conv2_leaky_1': [32, 32, 3, 2, 1]}),
OrderedDict({'conv3_leaky_1': [64, 64, 3, 2, 1]}),
],
[
CLSTM_cell(shape=(64,64), input_channels=16, filter_size=5, num_features=32), #When called goes to ConvRNN dir
CLSTM_cell(shape=(32,32), input_channels=32, filter_size=5, num_features=64),
CLSTM_cell(shape=(16,16), input_channels=64, filter_size=5, num_features=64)
]
]
convlstm_decoder_params = [
[
OrderedDict({'deconv1_leaky_1': [64, 64, 4, 2, 1]}),
OrderedDict({'deconv2_leaky_1': [64, 64, 4, 2, 1]}),
OrderedDict({
'conv3_leaky_1': [32, 16, 3, 1, 1],
'conv4_none_1': [16, 2, 1, 1, 0]
}),
],
[
CLSTM_cell(shape=(16,16), input_channels=64, filter_size=5, num_features=64),
CLSTM_cell(shape=(32,32), input_channels=64, filter_size=5, num_features=64),
CLSTM_cell(shape=(64,64), input_channels=64, filter_size=5, num_features=32),
]
]
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import pandas as pd
import json
app = dash.Dash()
with open("contrCode.json") as f:
country_codes = json.load(f)
with open("dataToUse.json") as f:
data = json.load(f)
app.layout = html.Div([
html.Div([
dcc.Location(id='url', refresh=False),
html.H1(id='country'),
html.H3(id='total'),
html.Div([
dcc.RadioItems(
id='graph-type',
options=[{'label': i, 'value': k} for k, i in
enumerate(['Minimum', 'Average', 'Maximum'])],
value=0,
labelStyle={'display': 'inline-block'}
)
],
style={'width': '48%', 'display': 'inline-block'}),
]),
dcc.Graph(id='indicator-graphic'),
dcc.Slider(
id='days-slider',
min=1,
max=31,
value=1,
step=1,
marks={str(year): str(year) for year in range(1, 32)}
)
])
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
@app.callback(
dash.dependencies.Output('indicator-graphic', 'figure'),
[dash.dependencies.Input('url', 'pathname'),
dash.dependencies.Input('graph-type', 'value'),
dash.dependencies.Input('days-slider', 'value')])
def update_graph(country, graph_type, days_slider):
global data
country_data = data[country.strip("/")]
groups = list(country_data.keys())
values = [int(country_data[group][graph_type] * days_slider) for group in groups]
return {
'data': [go.Pie(
labels=groups,
values=values,
textinfo='value',
hoverinfo='label+percent'
)],
'layout': go.Layout(
)
}
@app.callback(
dash.dependencies.Output('country', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def update_country(country):
return country_codes[country.strip("/")]
@app.callback(
dash.dependencies.Output('total', 'children'),
[dash.dependencies.Input('url', 'pathname'),
dash.dependencies.Input('graph-type', 'value'),
dash.dependencies.Input('days-slider', 'value')])
def update_total(country, graph_type, days_slider):
global data
country_data = data[country.strip("/")]
groups = list(country_data.keys())
values = [int(country_data[group][graph_type] * days_slider) for group in groups]
return "Total: {} RUB".format(sum(values))
if __name__ == '__main__':
app.run_server()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata (eneldo@marcos.do)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from ..tools import is_ncf
from openerp import fields, models, api
import openerp.addons.decimal_precision as dp
from openerp import netsvc
class cjc_invoice_wizard(models.TransientModel):
_name = "cjc.invoice.wizard"
@api.model
def _get_reference_type(self):
return [('none', u'Referencia libre / Nº Fact. Proveedor'),
('01', '01 - Gastos de personal'),
('02', '02 - Gastos por trabajo, suministros y servicios'),
('03', '03 - Arrendamientos'),
('04', '04 - Gastos de Activos Fijos'),
('05', u'05 - Gastos de Representación'),
('06', '06 - Otras Deducciones Admitidas'),
('07', '07 - Gastos Financieros'),
('08', '08 - Gastos Extraordinarios'),
('09', '09 - Compras y Gastos que forman parte del Costo de Venta'),
('10', '10 - Adquisiciones de Activos'),
('11', '11 - Gastos de Seguro')
]
@api.model
def _get_journals(self):
if self.env.context.get("active_model", False):
active_model = self.pool.get("account.bank.statement").browse(self.env.cr, self.env.uid,
self.env.context["active_id"])
informal_journal = active_model.journal_id.informal_journal_id
gastos_journal_id = active_model.journal_id.gastos_journal_id
purchase_journal_id = active_model.journal_id.purchase_journal_id
res = []
res.append((informal_journal.id, informal_journal.name))
res.append((gastos_journal_id.id, gastos_journal_id.name))
res.append((purchase_journal_id.id, purchase_journal_id.name))
if len(res) != 3:
raise models.except_orm('Configuracion pendiente!',
"Se deben configurar los diarios para este tipo de docuemnto.")
return tuple(res)
company_id = fields.Many2one('res.company', 'Company', default=1)
partner_id = fields.Many2one("res.partner", "Proveedor")
reference_type = fields.Selection(_get_reference_type, "Tipo de comprobante", required=True)
date = fields.Date("Fecha", required=True, default=fields.Date.context_today)
concept = fields.Char("Concepto", required=True)
ncf = fields.Char("NCF", size=19)
journal_id = fields.Many2one("account.journal", "Diario de compra",
domain=[('ncf_special', 'in', ('gasto', 'informal', 'pruchase'))], required=True)
line_ids = fields.One2many("cjc.invoice.line.wizard", "invoice_id", "Productos", select=False, required=True,
ondelete='cascade')
ncf_requierd = fields.Boolean("NCF Requerido.", default=False)
ncf_minor = fields.Boolean(default=False)
@api.onchange("journal_id")
def onchange_journal(self):
if self.journal_id:
self.ncf_requierd = True
self.ncf_minor = False
if self.journal_id.ncf_special in ['gasto', 'informal']:
self.ncf_requierd = False
if self.journal_id.special_partner:
self.ncf_minor = True
self.partner_id = self.journal_id.special_partner.id
@api.model
def _parse_vals(self, current_model):
vals = {}
for inv in self:
journal_obj = self.env["account.journal"].browse(int(inv.journal_id))
if not journal_obj.default_credit_account_id.id:
raise models.except_orm('Configuracion pendiente!', "Se deben configurar las cuentas para este diario.")
elif not inv.line_ids:
raise models.except_orm('Registro sin productos!', "Debe de registrar por lo menos un producto.")
ncf_required = True
if journal_obj.ncf_special in ['gasto', 'informal']:
ncf_required = False
if ncf_required and not is_ncf(inv.ncf.encode("ascii")):
raise models.except_orm(u"NCF Invalido!", u"El NCF del proveedor no es válido!")
vals.update({
u'account_id': current_model.journal_id.default_credit_account_id.id,
u'check_total': 0,
u'child_ids': [[6, False, []]],
u'comment': "Factura de caja chica",
u'company_id': inv.company_id.id,
u'currency_id': journal_obj.company_id.currency_id.id,
u'date_due': False,
u'date_invoice': self.date,
u'fiscal_position': self.partner_id.property_account_position.id,
u'internal_number': self.ncf,
u'journal_id': int(self.journal_id.id),
u'message_follower_ids': False,
u'message_ids': False,
u'name': False,
u'ncf_required': ncf_required,
u'origin': current_model.name,
u'parent_id': False,
u'partner_bank_id': False,
u'partner_id': self.partner_id.id or self.journal_id.special_partner.id,
u'payment_term': False,
u'period_id': current_model.period_id.id,
u'reference': self.ncf,
u'reference_type': self.reference_type,
u'supplier_invoice_number': False,
u'tax_line': [],
u'user_id': self.env.uid,
u'pay_to': current_model.journal_id.pay_to.id,
u'invoice_line': []
})
for line in inv.line_ids:
line_list = [0, False]
line_dict = {}
line_dict.update({
u'account_analytic_id': False,
u'account_id': line.concept_id.account_expense.id,
u'asset_category_id': False,
u'discount': 0,
u'invoice_line_tax_id': [[6, False, [t.id for t in line.concept_id.supplier_taxes_id]]],
u'name': line.concept_id.name,
u'price_unit': abs(line.amount),
# u'product_id': line.concept_id.product_id.id,
u'quantity': 1,
u'uos_id': 1
})
line_list.append(line_dict)
vals["invoice_line"].append(line_list)
context = {u'default_type': u'in_invoice', u'journal_type': u'purchase'}
result = self.env["account.invoice"].with_context(context).create(vals)
return result
@api.multi
def create_purchase(self):
current_model = self.pool.get(self.env.context['active_model']).browse(self.env.cr, self.env.uid, self.env.context['active_id'])
purchase_invoice_id = self._parse_vals(current_model)
inv = self.env["account.invoice"].browse(purchase_invoice_id.id)
inv.check_total = inv.amount_total
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(self.env.uid, 'account.invoice', inv.id, 'invoice_open', self.env.cr)
lines_vals = {u'account_id': current_model.journal_id.default_debit_account_id.id,
u'amount': inv.amount_total * -1,
u'analytic_account_id': False,
u'date': inv.date_invoice,
u'name': self.concept,
u'partner_id': self.partner_id.id or self.journal_id.special_partner.id,
u'ref': inv.number,
u'sequence': 0,
u'statement_id': current_model.id,
u'type': u'supplier',
u'voucher_id': False,
u"invoice_id": inv.id,
u"journal_id": current_model.journal_id.id
}
self.pool.get('account.bank.statement.line').create(self.env.cr, self.env.uid, lines_vals, context=self.env.context)
return {'type': 'ir.actions.act_window_close'}
class cjc_invoice_line_wizard(models.TransientModel):
_name = "cjc.invoice.line.wizard"
concept_id = fields.Many2one("marcos.cjc.concept", "Conceptos", required=True)
amount = fields.Float('Amount', digits_compute=dp.get_precision('Account'), required=True, default=1)
invoice_id = fields.Many2one("cjc.invoice.wizard", "Factura", ondelete='cascade', select=True)
# "quantity": fields.float('Quantity', digits_compute= dp.get_precision('Product Unit of Measure'), required=True),
|
# Date: 10/09/2020
# Author: rohith mulumudy
# Description: contains configuration data.
# Check the keys which are tagged "Important" before execution. #Important
## Certificate Fetching Constants
### Contains domains list
cert_in_file = "sample.txt" #important
### File that stores domains which threw error while fetching certificates
cert_err_file = "error_hosts.txt"
### File that stores certifcate data
cert_out_file = "certs.json" # should be a json file
### File that temporarily stores certificate data (before parsing)
cert_tmp_file = "certs_temp.json"
### If True gets the certificate chain data else gets the end user certificate
cert_chain_flag = False
### If True coninues from where it stopped else starts the execution freshly
resume_flag = False # Important
########################################################################################
## Preporcessing
### urllib timeout
timeout = 10 # Important
########################################################################################
## get_san_domains
### File that stores san domains
san_file = "sans.txt"
########################################################################################
## General
### Optimal value for a 4 core machine any value >= 64
thread_count = 128 # Important
### Directory
directory = "data_files"
### display flag
display_flag = True # Important
### refreshes display after the specified amount of hosts are processed
display_rate = 100
|
# Generated by Django 3.2.8 on 2021-10-28 21:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chatbot', '0006_auto_20211028_1611'),
]
operations = [
migrations.AddField(
model_name='trainingmodels',
name='acc_train',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='trainingmodels',
name='acc_validate',
field=models.FloatField(null=True),
),
]
|
import numpy as np
from permaviss.gauss_mod_p.gauss_mod_p import gauss_col
def test_gauss_col():
A1 = np.array([[0, 1, 1], [1, 1, 0], [1, 1, 0]])
R1 = np.array([[0, 1, 0], [1, 0, 0], [1, 0, 0]])
T1 = np.array([[1, 1, 1], [0, 1, 1], [0, 0, 1]])
eq1 = np.array([R1, T1])
eq2 = np.array(gauss_col(A1, 2))
assert np.array_equal(eq1, eq2)
A2 = np.array([[2, 1, 0, 1], [3, 1, 2, 3], [4, 1, 4, 3], [2, 1, 1, 0]])
R2 = np.array([[2, 0, 4, 3], [3, 2, 2, 0], [4, 4, 0, 0], [2, 0, 0, 0]])
T2 = np.array([[1, 2, 1, 4], [0, 1, 2, 4], [0, 0, 1, 3], [0, 0, 0, 1]])
eq1 = np.array([R2, T2])
eq2 = np.array(gauss_col(A2, 5))
assert np.array_equal(eq1, eq2)
|
#!/usr/bin/env python
'''Helping functions'''
__author__ = 'Denys Tarnavskyi'
__copyright__ = 'Copyright 2018, RPD site project'
__license__ = 'MIT'
__version__ = '1.0'
__email__ = 'marzique@gmail.com'
__status__ = 'Development'
import re
import os
import secrets
from PIL import Image
from itsdangerous import URLSafeTimedSerializer
from rpd_site import app, db
from .constants import VAR_MAIL_SALT, VAR_SAFE_TIMED_KEY, VAR_PASSWORD_SALT, VAR_MIN_PASS_LEN
from .models import Role, User, Post, Upload
def create_role(role_name):
'''
adds new role to database
'''
role_search = Role.query.filter_by(name=role_name).first()
if not role_search:
role = Role(name=role_name)
db.session.add(role)
db.session.commit()
return True
print('New role ' + role_name + ' added!')
else:
return False
def delete_role(role_name):
'''
deletes new role from database
'''
role = Role.query.filter_by(name=role_name).first()
if role:
db.session.delete(role)
db.session.commit()
print('Role ' + role_name + ' deleted!')
else:
print('Role ' + role_name + ' doesn\'t exist!')
def get_all_roles():
'''
get's all role names from db table
'''
names = []
roles = Role.query.all()
for role in roles:
names.append(role.name)
return names
def get_role_tuples():
'''
make tuples duplicating role names for SelectField choices
'''
role_tuples = []
tuppy = zip(get_all_roles(), get_all_roles())
for item1, item2 in tuppy:
if item1 not in ['unconfirmed', 'confirmed']:
role_tuples.append((item1, item2))
return role_tuples
# TODO: definitely refactor this to something better then separate functions
def get_number_of_users():
return len(User.query.all())
def get_number_of_posts():
return len(Post.query.all())
def get_number_of_uploads():
return len(Upload.query.all())
# All useful functions and objects for routes
def month_translation(eng_month):
'''
Translates month name to Ukranian
'''
month_translations = {'January': 'Cічня', 'February': 'Лотого', 'March': 'Березня',
'April': 'Квітня', 'May': 'Травня', 'June': 'Червня', 'July': 'Липня',
'August': 'Серпня', 'September': 'Вересня', 'October': 'Жовтня',
'November': 'Листопада', 'December': 'Грудня'}
ukranian_month = month_translations[eng_month]
return ukranian_month
def password_check(password):
"""
Verify the strength of 'password'
A password is considered strong if:
8 characters length or more
1 digit or more
1 symbol or more
1 uppercase letter or more
1 lowercase letter or more
returns True if all checks passed
https://stackoverflow.com/a/32542964/10103803
"""
length_error = len(password) < VAR_MIN_PASS_LEN # length
digit_error = re.search(r"\d", password) is None # digits
uppercase_error = re.search(r"[A-Z]", password) is None # uppercase
lowercase_error = re.search(r"[a-z]", password) is None # lowercase
symbol_error = re.search(r"\W", password) is None # symbols
password_ok = not (
length_error or digit_error or uppercase_error or lowercase_error or symbol_error) # overall result
return password_ok
def save_picture(form_picture, size_crop, is_avatar):
'''
Uploads cropped image with randomised
filename and returns it's filename + input extension
'''
random_hex = secrets.token_hex(8)
# get image extension
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
if is_avatar:
picture_path = os.path.join(
app.root_path, 'static/img/avatars', picture_fn)
output_size = size_crop
i = Image.open(form_picture)
# crop top square to leave aspect ratio
f_width, _ = i.size
i = i.crop((0, 0, f_width, f_width))
i.thumbnail(output_size)
i.save(picture_path)
else:
picture_path = os.path.join(app.root_path, 'static/img', picture_fn)
output_size = size_crop
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
def generate_confirmation_token(email):
'''
Creates unique token for each email passed
:param email: email to create token for, used as part of the salt to make each token different.
:return: token
'''
serializer = URLSafeTimedSerializer(VAR_SAFE_TIMED_KEY)
return serializer.dumps(email, salt=VAR_MAIL_SALT + email)
def generate_password_token(email):
'''[summary]
Arguments:
email {string} -- [email to make token for]
'''
serializer = URLSafeTimedSerializer(VAR_SAFE_TIMED_KEY)
return serializer.dumps(email, salt=VAR_PASSWORD_SALT)
def role_label(role_name):
'''generate HTML button snippet for role'''
label_classes = {'unconfirmed': 'btn-default',
'confirmed': 'btn-primary',
'admin': 'btn-success',
'student': 'btn-info',
'teacher': 'btn-warning',
'moderator': 'btn-danger'
}
return '<button type="button" style="padding: 2px;" data-whatever="' + role_name + '"\
data-toggle="modal" data-target="#deleteModal" class="btn btn-sm '\
+ label_classes[role_name] + '">' + role_name + '</button>'
# TODO: rename span to button (figure out how to refactor variable across all files like in PyCharm)
def role_spans(user):
'''Generate list of role spans(buttons!) for specific user'''
spans = []
roles = user.get_roles()
for role in roles:
span = role_label(role)
spans.append(span)
return spans
# TODO: think about it
# def add_superadmin():
# user = User.query.filter_by(id=1).first()
# user.add_roles('superadmin')
|
def es5(sequenza):
n = len(sequenza)
T = [0 for _ in range(n)]
T[0] = 1
max_index = 0
for i in range(1, n):
T[i] = max([T[j] for j in range(0, i) if sequenza[j] < sequenza[i]],
default = 0) + 1
max_index = max_index if T[max_index] >= T[i] else i
# Ricostruisco la sequenza.
nuova_sequenza = ["_" for _ in range(n)]
length_seq = T[max_index]
last_index = max_index
while length_seq > 0:
nuova_sequenza[last_index] = sequenza[last_index]
length_seq -= 1
if length_seq > 0:
left_index = last_index - 1
while sequenza[left_index] > sequenza[last_index] \
or T[left_index] != length_seq:
left_index -= 1
last_index = left_index
return nuova_sequenza
|
import random
import adecide
from adecide import adecide
class attack:
def start(self,na,nd):
self.x=0
self.l=adecide()
self.m=attack()
if adecide.decide(self.l,na,nd)==1:
self.x=adecide.war(self.m,na,nd)
else:
pass
return self.x
def war(self,na,nd):
self.a=0
self.m=0
while ((self.a==0)):
self.t=attack()
self.l=adecide()
#print("war!")
self.m=self.m+1
if adecide.win(self.l,nd)>0:
self.k=1
break
else:
self.k=0
self.numadie=attack.numadie(self.t,na)
self.numddie=attack.numddie(self.t,nd)
self.aroll=attack.aroll(self.t,self.numadie)
self.droll=attack.droll(self.t,self.numddie)
self.out=attack.compare(self.t,self.aroll,self.droll)
na=attack.aresult(self.t,self.out,na)
nd=attack.dresult(self.t,self.out,nd)
self.a=adecide.cont(self.l,na,nd)
print("numturns= ",self.m)
return self.k
def numadie(self,na):
if na>3:
self.numadie=3
else:
self.numadie=na-1
#print("numadie= ",self.numadie)
return self.numadie
def numddie(self,nd):
if nd>1:
self.numddie=2
elif nd==1:
self.numddie=1
else:
self.numddie=0
#print("numddie= ",self.numddie)
return self.numddie
def diecreate(self,num):
self.out=[]
for self.x in range(num):
self.out.append(0)
#print("diecreated= ",self.out)
return self.out
def aroll(self,num):
self.j=attack()
self.aroll=attack.diecreate(self.j,num)
for self.x in range(num):
self.aroll[self.x]=random.randint(1,6)
self.aroll.sort(reverse=True)
#print("aroll= ",self.aroll)
return self. aroll
def droll(self,num):
self.j=attack()
self.droll=attack.diecreate(self.j,num)
for self.x in range(num):
self.droll[self.x]=random.randint(1,6)
self.droll.sort(reverse=True)
#print("droll= ",self.droll)
return self.droll
def compare(self,aroll,droll):
self.out=[0,0]
if len(aroll)>len(droll):
for self.x in range(len(droll)):
if aroll[self.x] > droll[self.x]:
self.out[1]=self.out[1]-1
#print("defense lost 1")
else:
self.out[0]=self.out[0]-1
else:
for self.x in range(len(droll)):
if aroll[self.x] > droll[self.x]:
self.out[1]=self.out[1]-1
#print("defense lost 1")
else:
self.out[0]=self.out[0]-1
return self.out
def aresult(self,out,na):
na=na+out[0]
#print("aresult= ",na)
return na
def dresult(self,out,nd):
nd=nd+out[1]
#print("dresult= ",nd)
return nd
|
from django.contrib import admin
from . models import joinus, researchpaper, alumini
# Register your models here.
admin.site.register(researchpaper)
admin.site.register(joinus)
admin.site.register(alumini)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.