text stringlengths 8 6.05M |
|---|
"""
MIT License
Copyright (c) 2018 Max Planck Institute of Molecular Physiology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
import os
import glob
import typing
import pandas as pd # type: ignore
from . import util
FILE_DIRECTORY: str = os.path.dirname(os.path.realpath(__file__))
def create_star_header(names: typing.List[str], prefix: str) -> typing.List[str]:
"""
Create a header for a star file.
Arguments:
names - List or array of header names
prefix - Star file header name prefix
Returns:
Header string
"""
output_list: typing.List[str] = [
'',
'data_',
'',
'loop_',
]
output_list.extend(util.create_header(names=names, index=True, prefix=prefix))
return output_list
def dump_star(file_name: str, data: pd.DataFrame, version: str) -> None:
"""
Create a star file.
Arguments:
file_name - File name to export
data - Data to export
version - output version string
Returns:
None
"""
header: typing.List[str]
new_header: typing.List[str]
old_header: typing.List[str]
prefix: str
new_header, old_header, prefix = \
export_star_header(header_names=data.keys(), version=version)
header = create_star_header(names=new_header, prefix=prefix)
util.dump_file(
file_name=file_name,
data=data[old_header],
header=header,
vertical=True
)
def load_star_header(file_name: str) -> typing.Tuple[typing.List[str], int]:
"""
Load the header information.
Arguments:
file_name - Path to the file that contains the header.
Returns:
List of header names, rows that are occupied by the header.
"""
start_header: bool = False
header_names: typing.List[str] = []
idx: int
with open(file_name, 'r') as read:
for idx, line in enumerate(read.readlines()):
if line.startswith('_'):
if start_header:
header_names.append(line.strip().split()[0])
else:
start_header = True
header_names.append(line.strip().split()[0])
elif start_header:
break
if not start_header:
raise IOError(f'No header information found in {file_name}')
return header_names, idx
def load_star(file_name: str) -> pd.DataFrame:
"""
Load a star file.
Arguments:
file_name - Path to the star file
Returns:
Pandas dataframe containing the star file
"""
header_names: typing.List[str]
import_names: typing.List[str]
skip_index: int
star_data: pd.DataFrame
header_names, skip_index = load_star_header(file_name=file_name)
import_names = import_star_header(header_names=header_names)
star_data = util.load_file(file_name, names=import_names, skiprows=skip_index)
return star_data
def import_star_header(header_names: typing.List[str]) -> typing.List[str]:
"""
Get the header keys.
Detect the star version automatically.
Arguments:
header_names - star file header.
Returns:
List of new keys
"""
key_files: typing.List[str]
star_version: typing.Dict[str, typing.Dict[str, str]]
version_match: typing.Pattern
versions: typing.Optional[typing.List[str]]
key_match: typing.Optional[typing.Match[str]]
import_dict: typing.Dict[str, str]
output_header: typing.List[str]
key_files = glob.glob(os.path.join(FILE_DIRECTORY, 'keys', 'star_keys_*.txt'))
star_version = {}
version_match = re.compile(r'.*star_keys_(.*)\.txt')
versions = None
key_match = None
for file_name in sorted(key_files):
key_match = version_match.match(file_name)
assert key_match is not None
star_version[key_match.group(1)] = util.parse_keys_to_dict(util.import_keys(file_name))
for name in header_names:
versions = []
for key, value in star_version.items():
if name.lstrip(f'_{value["STAR_PREFIX"]}') in value:
versions.append(key)
if not versions:
assert False, f'Star key not known in present versions: {name}'
elif len(versions) == 1:
break
assert versions is not None, f'Header names is empty!'
output_header = []
import_dict = star_version[versions[-1]]
for name in header_names:
output_header.append(import_dict[name.lstrip(f'_{import_dict["STAR_PREFIX"]}')])
return output_header
def export_star_header(
header_names: typing.List[str],
version: str
) -> typing.Tuple[typing.List[str], typing.List[str], str]:
"""
Get the header keys.
Arguments:
header_names - star file header.
version - Output star file version
Returns:
List of new keys, List of valid old keys, prefix
"""
key_tuple: typing.Tuple[str, ...]
output_header: typing.List[str]
old_header_values: typing.List[str]
export_dict: typing.Dict[str, str]
key_tuple = util.import_keys(
os.path.join(FILE_DIRECTORY, 'keys', f'star_keys_{version}.txt')
)
export_dict = util.parse_keys_to_dict(key_tuple, export=True)
output_header = []
old_header_values = []
for name in header_names:
try:
new_name = export_dict[name]
except KeyError:
continue
else:
output_header.append(new_name)
old_header_values.append(name)
assert output_header
assert old_header_values
return output_header, old_header_values, export_dict['STAR_PREFIX']
|
from ax import optimize
from logging import CRITICAL
from ax.utils.common.logger import get_logger
import warnings
rt = get_logger(name='ax')
rt.setLevel(CRITICAL)
warnings.filterwarnings("ignore", category=UserWarning)
def test_intro_example():
""" https://ax.dev/ """
best_parameters, best_values, experiment, model = optimize(
parameters=[
{
"name": "x1",
"type": "range",
"bounds": [-10.0, 10.0],
},
{
"name": "x2",
"type": "range",
"bounds": [-10.0, 10.0],
},
],
# Booth function
evaluation_function=lambda p: (p["x1"] + 2 * p["x2"] - 7) ** 2 + (2 * p["x1"] + p["x2"] - 5) ** 2,
minimize=True,
)
return best_values
if __name__ == '__main__':
test_intro_example()
warning_0_1_16 = """
[INFO 09-30 20:54:49] ax.service.managed_loop: Running optimization trial 20...
/Users/petercotton/virtual-envs/tuneup/lib/python3.7/site-packages/ax/modelbridge/torch.py:311: UserWarning:
To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
"""
error_0_1_15 = """
/Users/petercotton/virtual-envs/tuneup/bin/python3 /Users/petercotton/github/tuneup/tests/test_ax.py
Traceback (most recent call last):
File "/Users/petercotton/github/tuneup/tests/test_ax.py", line 1, in <module>
from ax import optimize
File "/Users/petercotton/virtual-envs/tuneup/lib/python3.7/site-packages/ax/__init__.py", line 31, in <module>
from ax.modelbridge import Models
File "/Users/petercotton/virtual-envs/tuneup/lib/python3.7/site-packages/ax/modelbridge/__init__.py", line 10, in <module>
from ax.modelbridge.factory import (
File "/Users/petercotton/virtual-envs/tuneup/lib/python3.7/site-packages/ax/modelbridge/factory.py", line 22, in <module>
from ax.modelbridge.registry import (
File "/Users/petercotton/virtual-envs/tuneup/lib/python3.7/site-packages/ax/modelbridge/registry.py", line 48, in <module>
from ax.models.torch.botorch_modular.model import BoTorchModel
ModuleNotFoundError: No module named 'ax.models.torch.botorch_modular'
""" |
import csv, os
import pandas as pd
os.chdir(os.path.dirname(os.path.abspath(__file__)))
df = pd.read_csv('article-ids.csv')
articleid = df.set_index(['Article_Name']).to_dict(orient='dict')['Article_ID']
articles = list(articleid.values())
articles.sort()
filepath = 'wikispeedia_paths-and-graph//paths_finished.tsv'
with open(filepath, 'r') as f:
csv_reader = list(csv.reader(f, delimiter = '\t'))
csv_reader = csv_reader[16:]
pathsnoback = list() # human path, path length without back
pathsback = list() # human path, path length with back
reqpaths = dict()
for row in csv_reader:
tmp = row[3].split(';')
if len(tmp) > 1:
reqpaths[(articleid[tmp[0]], articleid[tmp[-1]])] = None
tmp2 = tmp.copy()
i=0
n = 1
while(i<len(tmp2)-1):
if tmp2[i+1] == '<':
tmp2[i+1] = tmp2[i-n-1]
n+=2
else:
n = 0
i+=1
pathsback.append([row[3], len(tmp2)-1])
if '<' in tmp:
i = 0
while(i<len(tmp)-1):
if tmp[i+1] == '<':
tmp.pop(i)
tmp.pop(i)
if tmp[i] == '<':
i-=1
else:
i+=1
pathsnoback.append([';'.join(tmp), len(tmp)-1])
i = 0 # tracks source in sortest distance txt
hp = 0
with open('wikispeedia_paths-and-graph//shortest-path-distance-matrix.txt') as f:
for line in f:
if (line[0].isdigit() or line[0] == '_'):
for j in range(len(articles)):
if (articles[i], articles[j]) in reqpaths:
reqpaths[(articles[i], articles[j])] = line[j]
if line[j] == 0:
print(articles[i], articles[j])
i+=1
i=0
while(i < len(pathsnoback)):
tmp = pathsnoback[i][0].split(';')
start = articleid[tmp[0]]
end = articleid[tmp[-1]]
pathsnoback[i].append(reqpaths[start, end])
pathsback[i].append(reqpaths[start, end])
if reqpaths[start, end] != '_' and int(reqpaths[start, end]) == 0:
print(reqpaths[start, end],start, end)
i+=1
elif reqpaths[start, end] != '_':
pathsnoback[i].append( pathsnoback[i][1] / int(reqpaths[start, end]))
pathsback[i].append( pathsback[i][1] / int(reqpaths[start, end]))
pathsnoback[i].pop(0)
pathsback[i].pop(0)
i+=1
else:
pathsnoback.pop(i)
pathsback.pop(i)
pathsnoback.insert(0, ['Human_Path_Length', 'Shortest_Path_Length', 'Ratio'])
pathsback.insert(0, ['Human_Path_Length', 'Shortest_Path_Length', 'Ratio'])
with open("finished-paths-no-back.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(pathsnoback)
with open("finished-paths-back.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(pathsback)
|
##
valor_kg = 37.00
p = 9.25
m = 18.50
g = 27.75
tamanho = str(input("Informe o tamanho desejado? ")).lower()
adicional = int(input("Deseja quantos adicionais? "))
def verificarTamanho(tamanho):
while tamanho in ['p', 'm', 'g']:
if tamanho == 'p':
tamanho = p
return tamanho
elif tamanho == 'm':
tamanho = m
return tamanho
elif tamanho == 'g':
tamanho = g
return tamanho
else:
print('Infome se P, M ou G: ')
tamanho = str(input("Informe o tamanho desejado? ")).lower()
continue
def preco(tamanho, adicional):
tamanho = verificarTamanho(tamanho)
preco = tamanho + (adicional * 2.50)
return preco
print("O valor do açai é", verificarTamanho(tamanho))
print("O valor do açai com adicionais é",preco(tamanho, adicional))
|
#!/usr/bin/env python
# coding: utf-8
# ## 서울시 공공 자전거 대여정보 처리
# In[215]:
import pandas as pd
import numpy as np
# ### 1) 대여소 정보 읽어오기
# In[216]:
place = pd.read_excel("./data/서울특별시 공공자전거 대여소 정보(19.12.9).xlsx", delimiter = ",")
place
# In[217]:
#1) null이 들어가 있는가? 몇 개가 들어있는가?
place.isnull().sum().sum()
# In[218]:
#2) nan이 있는 컬럼 출력
place.columns[place.isnull().any()].tolist()
# In[219]:
place[place['대여소ID'].isnull() | place['대여소주소'].isnull() | place['위도'].isnull() | place['경도'].isnull() | place['기준시작일자'].isnull()]
# In[220]:
#3) nan 값을 삭제 ==> 전처리
place = place.dropna(axis = 0)
place
# In[8]:
#4) 대여소 ID의 타입 확인 후 ==> int32로 변환
# In[199]:
place['대여소ID'].dtypes
# In[235]:
place['대여소ID'].astype(int)
# In[236]:
place
# ### 2) 대여 정보 읽어오기
# In[225]:
rent_info = pd.read_csv("./data/서울특별시 공공자전거 대여정보_201911_2.csv", encoding = "cp949", delimiter=",")
rent_info
# In[226]:
#1) null이 들어가 있는가? 몇 개가 들어있는가?
rent_info.isnull().sum().sum()
# In[227]:
#2) nan이 있는 컬럼 출력
rent_info.columns[rent_info.isnull().any()].tolist()
# In[237]:
#3) nan 값을 삭제 ==> 전처리
rent_info = rent_info.dropna(axis = 0)
rent_info
# In[228]:
#4) 이용거리의 타입 확인 후 ==> int32로 변환
rent_info['이용거리'].dtypes
# In[230]:
rent_info['이용거리'].astype(int)
# In[231]:
rent_info
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
'''
给定一个数组A[0,1,...,n-1],请构建一个数组B[0,1,...,n-1],
其中B中的元素B[i]=A[0]*A[1]*...*A[i-1]*A[i+1]*...*A[n-1]。不能使用除法。
'''
class Solution:
def multiply(self, A):
B = []
for i in range(len(A)):
tmp = 1
for j in (A[:i] + A[i+1:]):
tmp *= j
B.append(tmp)
return B
if __name__ == "__main__":
print Solution().multiply([1, 1, 2, 3, 4, 5]) |
'''
imputationflask.config
-------------------
Prod config, can be overwritten by instance/config.py when present
'''
# non-instance config is for prod
ENV = "prod"
DEBUG = False
# google stuff
PROJECT_NAME = 'census-impute'
TF_MODEL = 'base_census_infer'
CSRF_KEY_SECRET_ID = 'csrf-key'
# binaries
BUCKET_ID = 'basic_census_binaries'
NUMERIC_MAPPER_PATH = 'numeric_mapper3.dill'
DATA_COLUMNS_PATH = 'data_columns.dill'
VAL2IND_PATH = 'val2ind.dill'
RECORD_DESCRIPTION_PATH = 'recordname2description2.dill'
# sql
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
import numpy as np
def createDataSet():
group = np.array([[1.0,2.0],[1.2,0.1],[0.1,1.4],[0.3,3.5],[1.1,1.0],[0.5,1.5]])
labels = np.array(['A','A','B','B','A','B'])
return group,labels
def KNN_classify(k,dis,X_train,x_train,Y_test):
assert dis == 'M'or dis == 'E','dis must E or M,E means Euclidean Metric,M means Manhattan distance'
num_test = Y_test.shape[0]
num_train = X_train.shape[0]
labels_test = []
if (dis == 'E'):
for i in range(num_test):
distances = np.sqrt(np.sum(np.square(X_train - np.tile(Y_test[i,:],(num_train,1))),axis=1))
index_nearest = np.argsort(distances)
index_k = index_nearest[:k]
count_A=0
count_B=0
for j in range(k):
if x_train[index_k[j]] == 'A':
count_A = count_A + 1
if x_train[index_k[j]] == 'B':
count_B = count_B + 1
if count_A >= count_B:
labels_test.append('A')
else :
labels_test.append('B')
return labels_test
if __name__=='__main__':
group,labels = createDataSet()
y_test_pred = KNN_classify(1,'E',group,labels,np.array([[1.0,2.1],[0.4,2.0]]))
print(y_test_pred) |
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
'''
写一个函数,求两个整数之和,要求在函数体内不得使用+、-、*、/四则运算符号。
'''
class Solution:
def Add(self, num1, num2):
while(num2):
num1,num2 = (num1^num2) & 0xFFFFFFFF,((num1&num2)<<1) & 0xFFFFFFFF
return num1 if num1<=0x7FFFFFFF else ~(num1^0xFFFFFFFF)
if __name__ == "__main__":
print Solution().Add(1, -2) |
from animal import Animal
class Bear(Animal):
def __init__(self, name, age, habitat, heath_level, hapiness_level, bear_type):
super().__init__(name, age, habitat, heath_level, hapiness_level)
self.bear_type = bear_type
"""leon = Bear('simba', 5,'sabana', 50, 30)
print (leon.heath_level)
print (leon.hapiness_level)
leon.eating()
print (leon.heath_level)
print (leon.hapiness_level)
"""
|
# Generated by Django 3.0.3 on 2020-09-13 11:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0003_auto_20200913_1635'),
]
operations = [
migrations.AddField(
model_name='course',
name='course_content',
field=models.TextField(default='these are the contents', max_length=1000),
),
]
|
from fastapi import FastAPI
from uvicorn import run
app = FastAPI()
@app.get('/')
async def index():
return 'sup!'
|
import datetime
from django.db import models
from django.utils import timezone
class UniqueId(models.Model):
unique_id = models.CharField(max_length=200)
def __str__ (self):
return self.unique_id
class Member(models.Model):
uniqueid = models.ForeignKey(UniqueId, related_name="family_members", on_delete=models.CASCADE)
first_name = models.CharField(max_length=20)
middle_name = models.CharField(max_length=20, null = True, blank=True)
last_name = models.CharField(max_length=20, null=True, blank=True)
siblings = models.ManyToManyField('self', related_name="siblings")
Parents = models.ManyToManyField('self', related_name="childs")
childs = models.ManyToManyField('self', related_name="parents")
partners = models.ManyToManyField('self', related_name="partners")
def __str__(self):
return self.first_name
|
#!/usr/bin/env python
#Bao Dang
#Assignment 2
import sys
def preorder_evaluation(L):
stack = []
for i in range(len(L)-1, -1, -2):
if L[i].isdigit():
stack.append(L[i])
else:
op1 = int(stack.pop())
op2 = int(stack.pop())
if L[i] == '+':
stack.append(op1+op2)
elif L[i] == '-':
stack.append(op1-op2)
elif L[i] == '*':
stack.append(op1*op2)
elif L[i] == '/':
stack.append(op1*1.0/op2)
else:
print "There are wrong Operators"
return stack[0]
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Number of argument is not 2"
sys.exit(1)
else:
test = sys.argv[1]
print preorder_evaluation(test)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.autograd import Variable
import time as tm
import os
import math
INPUT_SIZE = 7
HIDDEN_SIZE = 48
OUTPUT_SIZE = 1
NUM_LAYERS = 1
RATE = 8e-3
# 建立模型
class LSTM_CONV(nn.Module):
def __init__(self, input_size=INPUT_SIZE, hidden_size=HIDDEN_SIZE, output_size=OUTPUT_SIZE, num_layers=NUM_LAYERS):
super(LSTM_CONV, self).__init__()
self.conv = nn.Conv1d(in_channels=1, out_channels=6, kernel_size=3, stride=1)
self.rnn = nn.LSTM(5, hidden_size, num_layers)#, dropout=0.5)
self.reg_1 = nn.Linear(hidden_size, output_size)
self.reg_2 = nn.Linear(6, output_size)
def forward(self, x):
x = self.conv(x)
#torch.nn.ReLU()
#print(x.shape)
x, _ = self.rnn(x)
#print(x.shape)
s,b,h = x.shape
x = x.view(s*b, h)
x = self.reg_1(x)
#torch.nn.ReLU()
x = x.view(s, -1)
#print(x.shape)
x = self.reg_2(x)
x = x.view(s,1,1)
return x
criterion = nn.MSELoss()
def test(data_X, data_Y):
seq = len(data_X[:,0])
time = np.arange(0,seq/0.5,2)
net = LSTM_CONV().cuda()
net.load_state_dict(torch.load('net_params.pkl'))
data_X = data_X.reshape(-1,1,7)
#data_X = torch.from_numpy(data_X)
data_Y = data_Y.reshape(-1)
#data_Y = torch.from_numpy(data_Y)
net.eval()
var_data = Variable(data_X.cuda())
pred_test = net(var_data)
loss = criterion(pred_test.reshape(-1), data_Y.cuda())
pred_test = pred_test.view(-1).cpu().data#.numpy()
test_loss = []
for i in range(len(data_Y)):
step_loss = math.sqrt(criterion(pred_test[i], data_Y[i]))
test_loss.append(step_loss)
print('RMSE:', math.sqrt(loss))
print('max RMSE:', np.max(test_loss))
print('average RMSE: ',np.mean(test_loss))
plt.subplot(211)
plt.plot(time, pred_test, 'r', label='prediction')
plt.plot(time, data_Y, 'b', label='real')
plt.legend(loc='best')
plt.title('result')
plt.subplot(212)
plt.plot(time, test_loss)
plt.title('RMSELoss')
plt.show()
|
#Gettysburg address analuysis
# count words, unique words
import string
char = string.punctuation
print(char)
def make_words_list(a_file):
"""Create a list of words from a file"""
word_list = [] #list of speech words
for line_str in a_file:
line_list = line_str.split()
for word in line_list:
if word != char:
word_list.append(word)
return word_list
def get_unique_words(word_list):
"""fuction gets list of unuique words in the list"""
unique_words = []
for word in word_list:
if word not in unique_words:
unique_words.append(word)
return unique_words
###############################################
#open file for reading
try:
gba_file = open("/home/oluwatobi/Documents/Pdf books/gettysburg.txt", "r")
except FileNotFoundError:
print("Error processing file")
speech_list = make_words_list(gba_file)
print(speech_list)
print("Length: ", len(speech_list))
print("Unique words length", len(get_unique_words(speech_list))) |
import os
from flask import Flask, render_template, request
from ocr_core import ocr_core
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from pymongo import MongoClient
from bson.objectid import ObjectId
app = Flask(__name__)
# define a folder to store and later serve the images
app.config["IMAGE_UPLOADS"] = "static/pdf/"
client = MongoClient('localhost',27017)
db = client.ocr_pdf
ocr = db.ocr
# allow files of a specific type
ALLOWED_IMAGE_EXTENSIONS = set(['pdf'])
# function to check the file extension
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_IMAGE_EXTENSIONS
# route and function to handle the home page
@app.route('/')
def home_page():
select_all = []
for obj in ocr.find( ):
select_all.append(obj)
return render_template('index.html' , select_all=select_all)
# route and function to handle the upload page
@app.route('/upload', methods=['POST'])
def upload():
if request.method == 'POST':
# check if there is a file in the request
if 'file' not in request.files:
return render_template('upload.html', msg='No file selected')
file = request.files['file']
# if no file is selected
if file.filename == '':
return render_template('upload.html', msg='No file selected')
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config["IMAGE_UPLOADS"], filename))
# call the OCR function on it
extracted_text = ocr_core(file)
extracted_text = extracted_text[0].splitlines()
for x in range(0 , extracted_text.count("")):
extracted_text.remove("")
for x in range(0 , extracted_text.count(" ")):
extracted_text.remove(" ")
json = {}
for str in extracted_text:
json[f'linha {extracted_text.index(str)+1}'] = str
ocr.insert_one(json)
# extract the text and display it
return render_template('upload.html',
msg='Upload concluido com sucesso!',
extracted_text=extracted_text,
img_src=app.config["IMAGE_UPLOADS"] + filename)
elif request.method == 'GET':
return render_template('upload.html')
@app.route('/view/<id>', methods=['GET'])
def view(id):
# pdf = []
pdf = ocr.find_one( {"_id": ObjectId(id)} )
return render_template('view.html' , pdf=pdf )
@app.route('/delete/<id>', methods=['GET'])
def delete(id):
deleted = ocr.find_one_and_delete( {'_id': ObjectId(id)} ) != None
select_all = []
for obj in ocr.find( ):
select_all.append(obj)
return render_template('index.html' , deleted = deleted , select_all=select_all )
@app.route('/update', methods=["POST"])
def update():
valor = request.form['valor']
object_id = request.form['id']
linha = request.form['linha']
update = ocr.update_one( {'_id': ObjectId(object_id)} , {"$set": {f"{linha}": f"{valor}"}}) != None
pdf = ocr.find_one( {"_id": ObjectId(object_id)} )
return render_template('view.html' , pdf=pdf , update = update )
if __name__ == '__main__':
app.run(debug=True) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 6 16:23:00 2019
@author: kanchana
"""
import numpy as np
m = np.random.rand(200,200)
temp=0
for i in range(m.shape[1]):
for j in range(m.shape[0]):
temp = temp +m[i,j]
print("Sum of m without the function: ", temp)
#print("Sum of m using the function: ", np.sum(m))
#print("Sum of m using map function: ", sum(map(sum,m)))
|
class Employee:
comp_name = "sathya"
def __init__(self,name,sal):
self.emp_name = name
self.emp_salary = sal
def displayDetails(self):
print(self.emp_name)
print(self.emp_salary)
print(Employee.comp_name)
#-----------------------------------
e1 = Employee("Ravi",10)
e1.displayDetails()
e2 = Employee("Kumar",20)
e2.displayDetails()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Assignment in BMP course - Network Information Table parser
Author: Jakub Lukac
E-mail: xlukac09@stud.fit.vutbr.cz
Created: 16-10-2019
Testing: python3.6
"""
import sys
from descriptor import parse_descriptors
from psi import PSI
class NIT(PSI):
__ACTUAL_NETWORK = 0x40
__OTHER_NETWORK = 0x41
def __init__(self, data):
# parse program-specific information frame
super().__init__(data)
if not (self.table_id == NIT.__ACTUAL_NETWORK or self.table_id == NIT.__OTHER_NETWORK):
print("NIT Error:", "Table ID is not NIT ID(0x40 or 0x41).", file=sys.stderr)
if not self.section_syntax_indicator:
print("NIT Error: ", "Section syntax indicator bit not set to 1.",
file=sys.stderr)
if not self.private_bit:
print("NIT Error:", "Private bit not set to 1.", file=sys.stderr)
self.__parse_nit_table(self.table_data)
def __parse_nit_table(self, data):
position_indicator = 0
# 4 bits reserved bits
reserved = (data[position_indicator] & 0xf0) >> 4
# 12 bits network descriptors length
network_descriptors_length = int.from_bytes(data[position_indicator:position_indicator + 2],
byteorder="big") & 0x0fff
position_indicator += 2
# variable bits read network descriptors
self.network_descriptors = parse_descriptors(
data[position_indicator:position_indicator + network_descriptors_length])
position_indicator += network_descriptors_length
# 4 bits reserved bits
reserved = (data[position_indicator] & 0xf0) >> 4
# 12 bits transport stream loop length
ts_loop_length = int.from_bytes(data[position_indicator:position_indicator + 2], byteorder="big") & 0x0fff
position_indicator += 2
# transport stream loop
self.ts_ids = []
self.og_network_ids = []
self.ts_descriptors = []
ts_loop_end_position = position_indicator + ts_loop_length # current position + loop length
while position_indicator < ts_loop_end_position:
# 16 bits transport stream id
self.ts_ids.append(int.from_bytes(data[position_indicator:position_indicator + 2], byteorder="big"))
position_indicator += 2
# 16 bits original delivery system id
self.og_network_ids.append(int.from_bytes(data[position_indicator:position_indicator + 2], byteorder="big"))
position_indicator += 2
# 4 bits reserved bits
reserved = (data[position_indicator] & 0xf0) >> 4
# 12 bits transport descriptors length
transport_descriptors_length = int.from_bytes(data[position_indicator:position_indicator + 2],
byteorder="big") & 0x0fff
position_indicator += 2
# variable bits read network descriptors
self.ts_descriptors.append(
parse_descriptors(data[position_indicator:position_indicator + transport_descriptors_length]))
position_indicator += transport_descriptors_length
def __str__(self):
nit_str = super().__str__()
nit_str += "Network ID: {self.id:#x}\n" \
"Network descriptors: [".format(self=self) \
+ ", ".join([format(desc) for desc in self.network_descriptors]) + \
"]\n" \
"TS IDs: [" \
+ ", ".join([format(id, "#06x") for id in self.ts_ids]) + \
"]\n" \
"Original network IDs: [" \
+ ", ".join([format(id, "#06x") for id in self.og_network_ids]) + \
"]\n" \
"TS descriptors: [[" \
+ "], [".join([", ".join([format(d) for d in descs]) for descs in self.ts_descriptors]) + \
"]]\n"
return nit_str
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# extract_grating_ede.py: extract grating E/dE data #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 09, 2021 #
# #
#################################################################################
import os
import sys
import re
import random
import numpy
import time
import Chandra.Time
#
#--- reading directory list
#
path = '/data/mta/Script/Grating/Grating_EdE/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
#
#--- temp writing file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
catg_list = ['HEGp1', 'HEGm1', 'MEGp1', 'MEGm1', 'LEGpAll', 'LEGmAll']
type_list = ['hetg', 'hetg', 'metg', 'metg', 'letg', 'letg']
header = '#year obsid energy fwhm denergy error order cnt roi_cnt acf acf_err link'
#------------------------------------------------------------------------------
#-- extract_grating_ede: extract grating E/dE data --
#------------------------------------------------------------------------------
def extract_grating_ede():
"""
extract grating E/dE data
input: none but read from <gdata_dir>/*/*/*Sky_<catg>_linelist.rdb
output: <data_dir>/<catg.lower>_data
"""
#
#--- go through each data set
#
for k in range(0, 6):
catg = catg_list[k]
#
#--- is there new file?
#
n_list = find_new_entries(catg)
if len(n_list) == 0:
continue
outfile = data_dir + catg.lower() + '_data'
if os.path.isfile(outfile):
otype = 'a'
line = ''
else:
otype = 'w'
line = header + '\n'
#
#--- go through each data file
#
for ent in n_list:
out = read_rdb_file(ent)
data = select_data(out, type_list[k])
if len(data[0]) == 0:
continue
#
#--- if there are data, update the data file
#
for n in range(0, len(data[0])):
line = line + str(data[0][n]) + '\t' + str(data[1][n])
for m in range(2, 12):
line = line + '\t' + str(data[m][n])
line = line + '\n'
with open(outfile, otype) as fo:
fo.write(line)
#------------------------------------------------------------------------------
#-- find_new_entries: find un-processed data file names --
#------------------------------------------------------------------------------
def find_new_entries(catg):
"""
find un-processed data file names
input: catg --- category of the data
output: new --- a list of the file names
"""
#
#--- read already processed data files
#
pfile = house_keeping + catg
try:
pdata = mcf.read_data_file(pfile)
except:
pdata = []
#
#--- get current data file list
#
cmd = 'ls ' + gdata_dir + '/*/*/*Sky_' + catg + '_linelist.rdb > ' + zspace
os.system(cmd)
ndata = mcf.read_data_file(zspace)
#
#--- move the current list to <house_keeping>
#
cmd = 'mv ' + zspace + ' ' + pfile
os.system(cmd)
#
#--- find un-processed data files
#
new = list(set(ndata) - set(pdata))
return new
#------------------------------------------------------------------------------
#-- select_data: select out data which fit to the selection criteria --
#------------------------------------------------------------------------------
def select_data(idata, type):
"""
select out data which fit to the selection criteria
input: indata
idata[0]: year
idata[1]: obsid
idata[2]: energy
idata[3]: fwhm
idata[4]: denergy
idata[5]: error
idata[6]: order
idata[7]: cnt
idata[8]: roi_cnt
idata[9]: acf
idata[10]: acf_err
idata[11]: links
type --- type of the data; letg, metg, hetg
output: out --- selected potion of the data
"""
out = []
for k in range(0, 12):
out.append([])
for m in range(0, len(idata[0])):
if (idata[5][m] / idata[3][m] < 0.15):
#
#-- letg case
#
if type == 'letg':
for k in range(0, 12):
out[k].append(idata[k][m])
#
#--- metg case
#
elif idata[3][m] * 1.0e3 / idata[2][m] < 5.0:
if type == 'metg':
for k in range(0, 12):
out[k].append(idata[k][m])
#
#--- hetg case
#
else:
if abs(idata[3][m] - 1.01) > 0.01:
for k in range(0, 12):
out[k].append(idata[k][m])
return out
#------------------------------------------------------------------------------
#-- read_rdb_file: read rdb data file --
#------------------------------------------------------------------------------
def read_rdb_file(infile):
"""
read data file
input: infile --- input file name
output: a list of:
idata[0]: year
idata[1]: obsid
idata[2]: energy
idata[3]: fwhm
idata[4]: denergy
idata[5]: error
idata[6]: order
idata[7]: cnt
idata[8]: roi_cnt
idata[9]: acf
idata[10]: acf_err
idata[11]: links
"""
atemp = re.split('\/', infile)
year = int(float(atemp[-3][3] + atemp[-3][4]))
if year > 90:
year += 1900
else:
year += 2000
obsid = atemp[-2]
line = 'obsid_' + obsid + '_Sky_summary.html'
link = infile.replace(atemp[-1], line)
data = mcf.read_data_file(infile)
year_l = []
obsid_l = []
energy = []
fwhm = []
error = []
denergy = []
order = []
cnt = []
roi_cnt = []
acf = []
acf_err = []
links_l = []
chk = 0
for k in range(20, len(data)):
ent = data[k]
#
#--- find the spot that the data actually start
#
if ent[0] == '#':
continue
if chk == 0:
if ent[0] == 'N':
chk = 1
continue
atemp = re.split('\s+', ent)
#
#--- drop bad data
#
try:
eng = float(atemp[3])
deng = float(atemp[7])
except:
continue
if (eng < 0.0) or (deng < 0.0):
continue
year_l.append(year)
obsid_l.append(obsid)
energy.append(eng)
fwhm.append(float(atemp[5]))
error.append(float(atemp[6]))
denergy.append(deng)
order.append(1)
cnt.append(int(float(atemp[1])))
roi_cnt.append(float(atemp[8]))
acf.append(float(atemp[9]))
acf_err.append(float(atemp[10]))
links_l.append(link)
#
#--- a couple of data are useful as numpy array
#
year_l = numpy.array(year_l)
energy = numpy.array(energy)
denergy = numpy.array(denergy)
return [year_l, obsid_l, energy, fwhm, denergy, error, order,
cnt, roi_cnt, acf, acf_err, links_l]
#------------------------------------------------------------------------------
if __name__ == '__main__':
extract_grating_ede()
|
# -*- coding: utf-8 -*-
from h5py import File as HDF5File
from common import get_logger
import numpy as np
class ClassPatches:
def __init__(self, filename, indexes, patch_name):
self.file_name = filename
self.indexes = indexes
self.new_index = None
self.patches = None
self.patch_name = patch_name
def get_patches(self):
if self.patches is None:
self.load()
return self.patches
def load(self):
get_logger().info("Loading patches for " + self.file_name)
hfile = HDF5File(self.file_name, 'r')
patches = hfile[self.patch_name]
feature_dim = patches.shape[1]
indexes = self.indexes
num_patches=(indexes[:,1]-indexes[:,0]).sum()
self.patches = np.empty([num_patches, feature_dim])
self.new_index = np.empty([indexes.shape[0],2])
patch_start = n_image = 0
for iid in indexes:
n_patches = iid[1]-iid[0]
self.patches[patch_start:patch_start+n_patches,:] = patches[iid[0]:iid[1],:]
self.new_index[n_image] = [patch_start, patch_start+n_patches]
patch_start += n_patches
n_image += 1
hfile.close()
get_logger().info("Loaded " + str(num_patches) + " patches")
def unload(self):
get_logger().info("Unloading patches for " + self.file_name)
self.patches = None
def get_num_patches(self):
return (self.indexes[:,1]-self.indexes[:,0]).sum()
def get_new_indexes(self):
# indexes according to the new patch layout, after extraction
if self.new_index is None:
self.load()
return self.new_index
|
import sqlite3
from flask import Flask
from flask import jsonify
from flask import request
import mybackend
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def calculate_recommendations():
start_station_name = request.args.get('startlocation')
duration_time = request.args.get('timeduration')
recommendations_amount = request.args.get('k')
try:
validate(start_station_name, duration_time, recommendations_amount)
db = mybackend.Database()
answers = db.calculate_recommendations(start_station_name, int(duration_time), int(recommendations_amount))
return jsonify(answers)
except Exception as e:
return jsonify('Error %s' % (e.args[0]))
#validate function
def validate(start_station_name, duration_time, recommendations_amount):
try:
int(duration_time)
except:
raise ValueError("Please enter valid duration time")
if int(duration_time) <= 0:
raise ValueError ("Please enter valid duration time")
try:
int(recommendations_amount)
except:
raise ValueError ("Please enter valid recommendations amount")
if int(recommendations_amount) <= 0:
raise ValueError ("Please enter valid recommendations amount")
if len(start_station_name) <= 0:
raise ValueError ("Please enter valid start location")
return True
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('testapp', '0011_auto_20150424_2157'),
]
operations = [
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entity_id', models.IntegerField()),
('value_text', models.TextField(null=True, blank=True)),
('value_float', models.FloatField(null=True, blank=True)),
('value_date', models.DateField(null=True, blank=True)),
('value_bool', models.NullBooleanField()),
('value_range_min', models.FloatField(null=True, blank=True)),
('value_range_max', models.FloatField(null=True, blank=True)),
],
options={
'ordering': ['entity_type', 'entity_id', 'schema'],
'abstract': False,
'verbose_name': 'attribute',
'verbose_name_plural': 'attributes',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
],
options={
'ordering': ('title',),
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Schema',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(help_text='user-friendly attribute name', max_length=250, verbose_name='title')),
('name', autoslug.fields.AutoSlugField(max_length=250, verbose_name='name', blank=True)),
('help_text', models.CharField(help_text='short description for administrator', max_length=250, verbose_name='help text', blank=True)),
('datatype', models.CharField(max_length=5, verbose_name='data type', choices=[(b'text', 'text'), (b'float', 'number'), (b'date', 'date'), (b'bool', 'boolean'), (b'one', 'choice'), (b'many', 'multiple choices'), (b'range', 'numeric range')])),
('required', models.BooleanField(default=False, verbose_name='required')),
('searched', models.BooleanField(default=False, verbose_name='include in search')),
('filtered', models.BooleanField(default=False, verbose_name='include in filters')),
('sortable', models.BooleanField(default=False, verbose_name='allow sorting')),
],
options={
'ordering': ['title'],
'abstract': False,
'verbose_name': 'schema',
'verbose_name_plural': 'schemata',
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='paramproduct',
name='param',
),
migrations.RemoveField(
model_name='paramproduct',
name='product',
),
migrations.AddField(
model_name='choice',
name='schema',
field=models.ForeignKey(related_name='choices', to='testapp.Schema'),
preserve_default=True,
),
migrations.AddField(
model_name='attribute',
name='choice',
field=models.ForeignKey(blank=True, to='testapp.Choice', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='attribute',
name='entity_type',
field=models.ForeignKey(to='contenttypes.ContentType'),
preserve_default=True,
),
migrations.AddField(
model_name='attribute',
name='schema',
field=models.ForeignKey(related_name='attrs', to='testapp.Schema'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='attribute',
unique_together=set([('entity_type', 'entity_id', 'schema', 'choice')]),
),
migrations.RenameField(
model_name='product',
old_name='url',
new_name='img_url',
),
migrations.RemoveField(
model_name='filterparam',
name='param',
),
migrations.RemoveField(
model_name='product',
name='category',
),
migrations.RemoveField(
model_name='product',
name='name',
),
migrations.RemoveField(
model_name='product',
name='params',
),
migrations.DeleteModel(
name='Param',
),
migrations.DeleteModel(
name='ParamProduct',
),
migrations.AddField(
model_name='product',
name='title',
field=models.CharField(default=b'', max_length=50),
preserve_default=True,
),
migrations.AlterField(
model_name='product',
name='price',
field=models.DecimalField(max_digits=18, decimal_places=2),
preserve_default=True,
),
]
|
# Randomly generates N distinct integers with N provided by the user,
# inserts all these elements into a priority queue, and outputs a list
# L consisting of all those N integers, determined in such a way that:
# - inserting the members of L from those of smallest index of those of
# largest index results in the same priority queue;
# - L is preferred in the sense that the last element inserted is as large as
# possible, and then the penultimate element inserted is as large as possible, etc.
#
# [27, 12, 24]
# [12, 24, 27]
#
# Written by *** and Eric Martin for COMP9021
import sys
from random import seed, sample
from priority_queue_adt import *
# Possibly define some functions
def delete_node(n):
# find the index corresponse to number n
for i in range(1, pq._length + 1):
if pq._data[i] == n:
break
pq._data[i], pq._data[pq._length] = pq._data[pq._length], pq._data[i]
pq._length -= 1
# When the priority queue is one quarter full, we reduce its size to make it half full,
# provided that it would not reduce its capacity to less than the minimum required.
if pq.min_capacity // 2 <= pq._length <= len(pq._data) // 4:
pq._resize(len(pq._data) // 2)
pq._bubble_down(i)
return n
def preferred_sequence():
preferred_sequence = []
while len(pq):
check_list = sorted(pq._data[1 : len(pq) + 1], reverse=True)
# check from the largest number
for n in check_list:
copy_data = pq._data[:]
delete_node(n)
pq.insert(n)
# if the tree keeps the same after delete and insert a particular number n
# then prepend this number on preferred_sequence, and then delete n from pq
if pq._data[1 : len(pq) + 1] == copy_data[1 : len(pq) + 1]:
preferred_sequence.insert(0, n)
delete_node(n)
break
# if the number is not suitable for preferred_sequence, restore pq and examine next number
else:
pq._data = copy_data[:]
return preferred_sequence
# Replace pass above with your code (altogether, aim for around 24 lines of code)
try:
for_seed, length = [int(x) for x in input('Enter 2 nonnegative integers, the second one '
'no greater than 100: '
).split()
]
if for_seed < 0 or length > 100:
raise ValueError
except ValueError:
print('Incorrect input (not all integers), giving up.')
sys.exit()
seed(for_seed)
L = sample(list(range(length * 10)), length)
# print('L = ', L)
pq = PriorityQueue()
for e in L:
pq.insert(e)
print('The heap that has been generated is: ')
print(pq._data[ : len(pq) + 1])
print('The preferred ordering of data to generate this heap by successsive insertion is:')
print(preferred_sequence())
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits import mplot3d
import matplotlib
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 18}
matplotlib.rc('font', **font)
#######################################
########## Load in all screened trajectories, cluster1 trajectories, and cluster2 trajectories ########
t_file=open('Traj_Group_Data/screened_trajectories.dat')
transitions_tot=[]
with t_file as my_file:
for line in my_file:
myarray=np.fromstring(line, dtype=float, sep=' ')
transitions_tot.append(myarray)
transitions=transitions_tot
t_file=open('Traj_Group_Data/cluster1_paths.dat')
transitions_tot=[]
with t_file as my_file:
for line in my_file:
myarray=np.fromstring(line, dtype=float, sep=' ')
transitions_tot.append(myarray)
transitions_c1=transitions_tot
t_file=open('Traj_Group_Data/cluster2_paths.dat')
transitions_tot=[]
with t_file as my_file:
for line in my_file:
myarray=np.fromstring(line, dtype=float, sep=' ')
transitions_tot.append(myarray)
transitions_c2=transitions_tot
######### Load in input data #################
data = np.loadtxt('../MCPS/Input_Files/input.dat')
#######################################
######################FOr each group of trajectories, make lists of the azimuthal, inclination angles#############################
######################################################################################################################################
transitions_inc = {}
transitions_az = {}
transitions_z = {}
for i in range(len(transitions_c2)):
transitions_inc[i] = []
transitions_az[i] = []
transitions_z[i] = []
for j in range(len(transitions_c2[i])):
f = int(transitions_c2[i][j])
transitions_z[i] = np.append(transitions_z[i], data[f][0])
transitions_inc[i] = np.append(transitions_inc[i], data[f][1])
transitions_az[i] = np.append(transitions_az[i], data[f][2])
transitions_inc_c1 = {}
transitions_az_c1 = {}
transitions_z_c1 = {}
for i in range(len(transitions_c1)):
transitions_inc_c1[i] = []
transitions_az_c1[i] = []
transitions_z_c1[i] = []
for j in range(len(transitions_c1[i])):
f = int(transitions_c1[i][j])
transitions_z_c1[i] = np.append(transitions_z_c1[i], data[f][0])
transitions_inc_c1[i] = np.append(transitions_inc_c1[i], data[f][1])
transitions_az_c1[i] = np.append(transitions_az_c1[i], data[f][2])
transitions_inc_c2 = {}
transitions_az_c2 = {}
transitions_z_c2 = {}
for i in range(len(transitions_c2)):
transitions_inc_c2[i] = []
transitions_az_c2[i] = []
transitions_z_c2[i] = []
for j in range(len(transitions_c2[i])):
f = int(transitions_c2[i][j])
transitions_z_c2[i] = np.append(transitions_z_c2[i], data[f][0])
transitions_inc_c2[i] = np.append(transitions_inc_c2[i], data[f][1])
transitions_az_c2[i] = np.append(transitions_az_c2[i], data[f][2])
########### Plot inclination vs z for each cluster #############
################################################################
for i in range(len(transitions_c1)):
plt.plot(transitions_z_c1[i],transitions_inc_c1[i],color='black')
for i in range(len(transitions_c2)):
plt.plot(transitions_z_c2[i],transitions_inc_c2[i],color='forestgreen')
plt.ylim(0,180)
plt.savefig('Images/clusters_z_inc.png',bbox_inches='tight')
plt.close()
########### Plot azimuthal vs z for each cluster ################
################################################################
transitions_c1_az_total = []
transitions_c1_inc_total = []
transitions_c1_z_total = []
transitions_c2_az_total = []
transitions_c2_inc_total = []
transitions_c2_z_total = []
for i in range(len(transitions_c1)):
for j in range(len(transitions_c1[i])):
if transitions_az_c1[i][j]>290:
transitions_az_c1[i][j]-=360 ##### This is to deal with periodic trajectories
transitions_c1_az_total = np.append(transitions_c1_az_total,transitions_az_c1[i][j])
transitions_c1_z_total = np.append(transitions_c1_z_total,transitions_z_c1[i][j])
plt.plot(transitions_z_c1[i],transitions_az_c1[i],color='black')
for i in range(len(transitions_c2)):
for j in range(len(transitions_c2[i])):
if transitions_az_c2[i][j]<75:
transitions_az_c2[i][j]+=360 ##### This is to deal with periodic trajectories
transitions_c2_az_total = np.append(transitions_c2_az_total,transitions_az_c2[i][j])
transitions_c2_z_total = np.append(transitions_c2_z_total,transitions_z_c2[i][j])
plt.plot(transitions_z_c2[i],transitions_az_c2[i],color='forestgreen')
plt.ylim(0,360)
plt.savefig('Images/clusters_z_az.png',bbox_inches='tight')
plt.close()
|
import pygame
from pygame.surface import Surface
from Helpers.EventHelpers import EventExist
from Vector2 import Vector2
class ArrowItem:
def __init__(self, offset: Vector2, image: Surface = None, hover: Surface = None, rect=None):
self.Offset = offset
self.Image = image if image is not None else self._getTexture()
self.Hover = hover if hover is not None else self._getHoverTexture()
self.Rect = rect
def Update(self, game):
return self
def Draw(self, game):
# Extra screen-based properties
menuLeft_centerX = game.Settings.MenuLeftSize.X // 2
menuLeft_centerY = game.Settings.MenuLeftSize.Y // 2
x = menuLeft_centerX - self.Image.get_rect().centerx + self.Offset.X
y = menuLeft_centerY - self.Image.get_rect().centery + self.Offset.Y
if self.Hover is not None and self.IsHoverdByMouse():
self.Rect = game.Settings.GetScreen().blit(self.Hover, (x, y))
else:
self.Rect = game.Settings.GetScreen().blit(self.Image, (x, y))
def IsHoverdByMouse(self):
return self.Rect is not None and self.Rect.collidepoint(pygame.mouse.get_pos())
def IsClickedByMouse(self, game):
return self.IsHoverdByMouse() and EventExist(game.Events, pygame.MOUSEBUTTONUP)
def _getTexture(self):
return None
def _getHoverTexture(self):
return None
def GetDestinationPosition(self, pos: Vector2):
return pos
class ArrowButtonUp(ArrowItem):
def _getTexture(self):
return pygame.image.load('images/arrows/ArrowDarkUp.png').convert_alpha()
def _getHoverTexture(self):
return pygame.image.load('images/arrows/ArrowLightUp.png').convert_alpha()
def GetDestinationPosition(self, pos: Vector2):
return Vector2(pos.X, pos.Y - 1)
class ArrowButtonUpRight(ArrowItem):
def _getTexture(self):
return pygame.image.load('images/arrows/ArrowDarkUpRight.png').convert_alpha()
def _getHoverTexture(self):
return pygame.image.load('images/arrows/ArrowLightUpRight.png').convert_alpha()
def GetDestinationPosition(self, pos: Vector2):
return Vector2(pos.X + 1, pos.Y - 1)
class ArrowButtonRight(ArrowItem):
def _getTexture(self):
return pygame.image.load('images/arrows/ArrowDarkRight.png').convert_alpha()
def _getHoverTexture(self):
return pygame.image.load('images/arrows/ArrowLightRight.png').convert_alpha()
def GetDestinationPosition(self, pos: Vector2):
return Vector2(pos.X + 1, pos.Y)
class ArrowButtonDownRight(ArrowItem):
def _getTexture(self):
return pygame.image.load('images/arrows/ArrowDarkDownRight.png').convert_alpha()
def _getHoverTexture(self):
return pygame.image.load('images/arrows/ArrowLightDownRight.png').convert_alpha()
def GetDestinationPosition(self, pos: Vector2):
return Vector2(pos.X + 1, pos.Y + 1)
class ArrowButtonDown(ArrowItem):
def _getTexture(self):
return pygame.image.load('images/arrows/ArrowDarkDown.png').convert_alpha()
def _getHoverTexture(self):
return pygame.image.load('images/arrows/ArrowLightDown.png').convert_alpha()
def GetDestinationPosition(self, pos: Vector2):
return Vector2(pos.X, pos.Y + 1)
class ArrowButtonDownLeft(ArrowItem):
def _getTexture(self):
return pygame.image.load('images/arrows/ArrowDarkDownLeft.png').convert_alpha()
def _getHoverTexture(self):
return pygame.image.load('images/arrows/ArrowLightDownLeft.png').convert_alpha()
def GetDestinationPosition(self, pos: Vector2):
return Vector2(pos.X - 1, pos.Y + 1)
class ArrowButtonLeft(ArrowItem):
def _getTexture(self):
return pygame.image.load('images/arrows/ArrowDarkLeft.png').convert_alpha()
def _getHoverTexture(self):
return pygame.image.load('images/arrows/ArrowLightLeft.png').convert_alpha()
def GetDestinationPosition(self, pos: Vector2):
return Vector2(pos.X - 1, pos.Y)
class ArrowButtonUpLeft(ArrowItem):
def _getTexture(self):
return pygame.image.load('images/arrows/ArrowDarkUpLeft.png').convert_alpha()
def _getHoverTexture(self):
return pygame.image.load('images/arrows/ArrowLightUpLeft.png').convert_alpha()
def GetDestinationPosition(self, pos: Vector2):
return Vector2(pos.X - 1, pos.Y - 1)
|
from django.test import TestCase
from django.utils import timezone
from api.models import (AndelaUserProfile, Event, Category, UserProxy)
class BaseSetup(TestCase):
def setUp(self):
self.category1 = Category.objects.create(
id=1,
name="Gaming Meetup",
description="For people who want to be happy.",
featured_image="https://cdn.elegantthemes.com/"
)
self.user1 = UserProxy.create_user({
"username": "testuser1",
"first_name": "test",
"last_name": "user",
"email": "test@andela.com"
})
self.andela_user1 = AndelaUserProfile.objects.create(
google_id=1,
user=self.user1,
user_picture="https://lh5.googleusercontent.com"
)
self.event_1 = Event.objects.create(
title='event1',
description='event1 description',
venue='event1 venue',
start_date=timezone.now(),
end_date=timezone.now(),
creator=self.andela_user1,
social_event=self.category1,
active=True
)
|
t = int(input())
while t > 0:
s = str(input())
n = len(s)
l = s.count('L')
r = s.count('R')
u = s.count('U')
d = s.count('D')
min1 = min(l,r)
min2 = min(u,d)
arr = []
if ( l+d == n or r+d == n or l+u == n or r+u == n):
print(0)
print()
else:
for i in range(min1):
arr.append('R')
for i in range(min2):
arr.append('U')
for i in range(min1):
arr.append('L')
for i in range(min2):
arr.append('D')
if u == 0 or d == 0:
print(2)
print("LR")
elif l == 0 or r == 0:
print(2)
print("UD")
else:
print(len(arr))
print(*arr,sep="")
t = t-1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 24 08:11:05 2019
@author: imad
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
class PDR(object):
def __init__(self, X, y, classifier, resolution=0.02):
"""
A decission region plotter.
Parameters
----------
X : {array-like}, shape = [n_samples,n_features]
Training dataset containing feature vectors.
y : {array-like}, shape = [n_samples]
Target class labels for the samples in X.
classifier : object
The classifier object.
resolution :
----------
"""
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
a = (y == cl).ravel()
xx = []
yy = []
for i in range(len(a)):
if a.ndim == 2:
if a[i, 0]:
xx.append(X[i, 0])
yy.append(X[i, 1])
elif a.ndim == 1:
if a[i]:
xx.append(X[i, 0])
yy.append(X[i, 1])
else:
print("Dimension of a not handled")
plt.scatter(x=xx,
y=yy,
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
|
from mpi4py import MPI
comm = MPI.COMM_WORLD
group = comm.Get_group() # this is the world group
newgroup = group.Excl([0])
print(dir(newgroup))
newcomm = comm.Create(newgroup)
if comm.rank == 0:
assert newcomm == MPI.COMM_NULL
else:
assert newcomm.size == comm.size - 1
assert newcomm.rank == comm.rank - 1
print(comm.rank, comm.size, newcomm.rank, newcomm.size)
group.Free()
newgroup.Free()
if newcomm: newcomm.Free()
|
"""
This script prints the current system date.
"""
import theano as th
import datetime
print(datetime.date.today())
|
import cv2
import numpy as np
img = cv2.imread('images/bookpage.jpg')
grayscaled = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
th = cv2.adaptiveThreshold(grayscaled, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
cv2.imshow('original',img)
cv2.imshow('Adaptive threshold',th)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import torch.nn as nn
from torch.utils.data import Dataset
import torch
import os
from PIL import Image
class Generator(nn.Module):
"""
Generator Network, DCGAN-like architecture
with LeakyRELU and custom kernel shapes.
"""
def __init__(self, noise_channels, gan_features):
"""
Initializes network. Input parameters control
input noise size and network width.
:param noise_channels: input noise dimension
:param gan_features: conv channel width factor.
"""
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(noise_channels, gan_features * 8,
(4, 3), 1, 0, bias=False),
nn.BatchNorm2d(gan_features * 8),
nn.LeakyReLU(0.2, True),
# state size. (gan_features*8) x 4 x 3
nn.ConvTranspose2d(gan_features * 8, gan_features * 4,
5, 3, 2, bias=False),
nn.BatchNorm2d(gan_features * 4),
nn.LeakyReLU(0.2, True),
# state size. (gan_features*4) x 8 x 8
nn.ConvTranspose2d(gan_features * 4, gan_features * 2,
5, 3, (3, 2), bias=False),
nn.BatchNorm2d(gan_features * 2),
nn.LeakyReLU(0.2, True),
# state size. (gan_features*2) x 16 x 16
nn.ConvTranspose2d(gan_features * 2, gan_features * 2,
5, 3, (3, 2), bias=False),
nn.BatchNorm2d(gan_features * 2),
nn.LeakyReLU(0.2, True),
# state size. (gan_features) x 32 x 32
nn.ConvTranspose2d(gan_features * 2, gan_features,
5, 3, (4, 3), bias=False),
nn.BatchNorm2d(gan_features),
nn.LeakyReLU(0.2, True),
nn.ConvTranspose2d(gan_features, 3, (4, 5),
2, (3, 2), bias=False),
nn.BatchNorm2d(3),
# nn.Tanh()
nn.Sigmoid()
# state size. 3 x 64 x 64
)
def forward(self, noise):
"""
Pushes noise through the network.
The generator only needs to run through the main sequential.
:param noise: Tensor of shape (batch_size,latent_size,1,1)
"""
return self.main(noise)
class Discriminator(nn.Module):
"""
Discriminator Network, based of DCGAN, but with leakyRELU
and a few differences.
LeakyReLU is used, along with minibatch discrimination.
Minibatch feature scaling is handled by similarity_features.
"""
def __init__(self, disc_features, num_features,
similarity_features):
"""
:param disc_features: number of gan features, scales network width
:param num_features: number of features to be given to FC layer.
:param similarity_features: number of similarity features
for minibatch discrimination
"""
super(Discriminator, self).__init__()
self.num_features = num_features
# T is similarity matrix. Needs to be learnable parameter.
self.T = torch.nn.Parameter(
torch.randn(
similarity_features,
similarity_features,
num_features * 2),
requires_grad=True
)
self.feed_forward = nn.Sequential(
nn.Conv2d(3, disc_features, (5, 9), 2,
dilation=2, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(disc_features, disc_features * 2,
7, 3, dilation=(1, 2), bias=False),
nn.BatchNorm2d(disc_features * 2),
nn.LeakyReLU(0.2, inplace=True)
)
self.main_feed = nn.Sequential(
nn.Conv2d(3, disc_features, 8, (4, 3), 0, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (disc_features) x 105 x 105
nn.Conv2d(disc_features, disc_features * 2,
5, 3, (1, 2), bias=False),
nn.BatchNorm2d(disc_features * 2),
nn.LeakyReLU(0.2, inplace=True)
)
self.main = nn.Sequential(
# state size. (disc_features*2) x 16 x 16
nn.Conv2d(disc_features * 4, disc_features * 4,
5, 3, 2, bias=False),
nn.BatchNorm2d(disc_features * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (disc_features*4) x 8 x 8
nn.Conv2d(disc_features * 4, disc_features * 8,
7, 2, 1, bias=False),
nn.BatchNorm2d(disc_features * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (disc_features*8) x 4 x 4
nn.Conv2d(disc_features * 8, num_features * 2,
4, 1, 0, bias=False)
)
self.fc1 = nn.Linear(num_features * 2, num_features, bias=False)
self.fc2 = nn.Linear(num_features + similarity_features, 1, bias=False)
def forward(self, images):
"""
Discriminator forward, minibatch discrimination implemented.
:param images: Input image tensor. (batch_size,c,h,w)
:return: label for CE loss and batch features for mean loss.
"""
features = self.main(
torch.cat(
[self.main_feed(images),
self.feed_forward(images[:,:,80:80+228+1,40:40+248+1])],
axis = 1
)
)
features = features.squeeze()
if len(features.shape) == 1:
features = features.unsqueeze(0)
# similarity matrix
# sf,sf,batch_size
similarity = torch.matmul(self.T, features.transpose(0, 1))
similarity = similarity.repeat(similarity.shape[2], 1, 1, 1)
similarity_t = similarity.transpose(0, -1)
# similarity distance scores
distance = torch.exp(-torch.sum(
torch.abs(similarity_t - similarity), dim=2))
distance = torch.sum(distance, dim=2)
distance = torch.cat([distance, self.fc1(features)], dim=1)
# output label (0 or 1)
label = torch.sigmoid(self.fc2(distance))
return features, label
class YgoCards(Dataset):
"""
GO Data Loader
Reads in card images as PIL images
"""
def __init__(self, root_dir, transform=None):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.transform = transform
self.file_list = [el for el in os.listdir(self.root_dir) if '.jpg' in el]
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir,
self.file_list[idx])
image = Image.open(img_name)
if self.transform:
image = self.transform(image)
return image
def init_weights(layer):
"""
Weight initializers.
Pass in a network layer to be initialized.
:param layer: reference to a layer object
:return: None
"""
class_name = layer.__class__.__name__
if class_name.find('Conv') != -1:
nn.init.normal_(layer.weight.data, 0, 0.02)
elif class_name.find('BatchNorm') != -1:
nn.init.normal_(layer.weight.data, 1.0, 0.02)
nn.init.constant_(layer.bias.data, 0)
elif class_name.find('Linear') != -1:
nn.init.xavier_uniform(layer.weight.data)
|
# multiprocessing worker site
import random, time, Queue
from multiprocessing.managers import BaseManager
task_queue, result_queue = Queue.Queue(), Queue.Queue()
# Inherite from BaseManager
class QueueManager(BaseManager): pass
QueueManager.register('get_task_queue')
QueueManager.register('get_result_queue')
# binding port: 9999, set authentication key as 'crawler'
m = QueueManager(address=('127.0.0.1', 9999), authkey='crawler')
m.connect()
task = m.get_task_queue()
result = m.get_result_queue()
while True:
page = task.get(timeout=10)
crawl(page)
result.put(page)
print('worker exit.')
|
from django.dispatch import receiver
from django.db.models.signals import post_save
from .models import OutCome, Analytic # , WheelSpin, Stake
from channels.layers import get_channel_layer
# from channels.db import database_sync_to_async
from asgiref.sync import async_to_sync
# from time import sleep
@receiver(post_save, sender=OutCome)
def on_results_save(sender, instance, **kwargs):
if instance.market is not None:
pointer_val = instance.pointer # fix id
market_id = instance.market_id
try:
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
"daru_spin", {"type": "spin_pointer", "pointer": pointer_val,}
)
async_to_sync(channel_layer.group_send)(
"daru_spin", {"type": "market_info", "market": market_id,}
)
except Exception as ce:
print(f"Channel error:{ce}") # debug
pass # issues with channel shouldn't inter normal business from being done
try:
try: # need test
cum, created = Analytic.objects.update_or_create(id=1)
if created:
pass
except Exception as ce:
print("CUMsinal", ce)
pass
except Exception as re:
print(f"REESignal error:{re}") # debug
pass # results later/manual by admin incase
else:
pass
#
# @receiver(post_save, sender=WheelSpin)
# def create_outcome_on_market_save(sender, instance, **kwargs):
# print(f'Creatin Outcome for Outcome id {instance.id}')
# try:
# OutCome.objects.create(market_id=instance.id-1)
# except Exception as e:
# print('NEWWWWSinal', e)
# pass
# @receiver(post_save, sender=Stake)
# def create_ioutcome_on_istake_save(sender, instance, **kwargs):
# try:
# if instance.market is None:
# print(f'Creatin Outcome for Outcome id {instance.id} of market{instance.market} ')
# OutCome.objects.create(stake_id=instance.id)
# else:
# print('No Out come created.ISPIN')
# pass
# except Exception as e:
# print('NEWWWWSinal', e)
# pass
# @receiver(post_save, sender=IoutCome)
# def on_oucome_save(sender, instance, **kwargs):
# ipointer_val = instance.pointer # fix id
# try:
# channel_layer = get_channel_layer()
# async_to_sync(channel_layer.group_send)(
# "i_spin",
# {
# "type": "ispin_pointer",
# "ipointer": ipointer_val,
# }
# )
# except Exception as ce:
# print(f'IChannel error:{ce}') # debug
# pass # issues with channel shouldn't inter normal business from being done
|
from unityagents import UnityEnvironment
import random
import torch
class HyperParameters:
def __init__(self):
self.buffer_size = int(1e5)
self.gamma_start = 0.9
self.gamma = 0.99
self.epsilon = 1.0
self.epsilon_decay = 1e-5
self.batch_size = 256
self.tau = 1e-3
self.lr_act = 1e-3
self.lr_critic = 1e-4
self.weight_decay = 1e-8
self.learn_num = 10
self.learn_every = 20
# Add OU Noise to actions
self.mu = 0
self.theta = 0.15
self.sigma = 0.2
self.noise_start = 1.0
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Options:
def __init__(self):
self.n_episodes = 5000
self.max_t = 1000
self.print_every = 50
self.update_frequency = 1
self.seed_num = 47
self.add_noise = True
self.graphics = False
self.option = True
self.env_name = "Tennis.exe"
Options = Options()
class Environment:
def __init__(self):
self.num_agents = 2
self.action_size = 2
self.state_size = 24
HyperParameters = HyperParameters()
Environment = Environment()
|
from Reader.InstanceReader.InstanceReaderCoCoStyle import ChemScapeDataset
import torch
import torchvision
from torchvision.models.detection import MaskRCNN
from torchvision.models.detection.rpn import AnchorGenerator
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import grad
from torch.autograd import Variable
from torchvision import datasets, transforms
backbone = torchvision.models.mobilenet_v2(pretrained=True).features
backbone.out_channels = 1280
anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),))
roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],output_size=7,sampling_ratio=2)
mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0], output_size=14,sampling_ratio=2)
model = MaskRCNN(backbone,num_classes=2,rpn_anchor_generator=anchor_generator,box_roi_pool=roi_pooler, mask_roi_pool=mask_roi_pooler)
optimizer = optim.SGD(model.parameters(), lr=0.005, momentum=0.9, weight_decay=0.0005)
dataDir= "../ChemLabScapeDataset/TrainAnnotations"
#dataset = ChemScapeDataset(dataDir, None, "Vessel", False)
d = datasets.CocoDetection(root="../coco/train2014", annFile="../coco/annotations/instances_train2014.json", transform=transforms.ToTensor())
dataLoader = torch.utils.data.DataLoader(d, batch_size=1, shuffle=True, num_workers=0)
for batch_idx, (data, target) in enumerate(dataLoader):
print(data.size())
print(target)
model(data, target)
break
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 2 23:40:44 2016
The Scene Class Containing Objects
@author: alex
"""
from Iterator import Iterator
class Scene(object):
def __init__(self):
self.name=""
self.key=None
self.obj_list={}
self.user_devices = {}
self.iterator = Scene_Iterator(self)
def is_object_in_scene(self, obj_key):
if obj_key in self.obj_list:
return True
return False
def add(self, key, obj):
self.obj_list[key] = obj
obj.scene_ids.append(self.key)
def remove(self, key):
del self.obj_list[key]
def get(self, key):
return self.obj_list[key]
def size(self):
return len(self.obj_list)
def is_userdevice_in_scene(self, obj_key):
if obj_key in self.obj_list:
return True
return False
def add_userdevice(self, key, ud):
self.user_devices[key] = ud
ud.scenes.append(self.key)
def remove_userdevice(self, key):
del self.user_devices[key]
def get_userdevice(self, key):
return self.user_devices[key]
def num_uds(self):
return len(self.user_devices)
def iterate_devices(self, function, **kwargs):
self.iterator.iterate_devices(function, **kwargs)
def iterate(self, function, **kwargs):
self.iterator.iterate(function, **kwargs)
#Advanced Iteration Operator that supports functions with the actual node and a single parameter
#Iterations meant for python 2.7
class Scene_Iterator(Iterator):
"""
:param Tree tree: Internal tree
"""
def __init__(self, scene):
super(Scene_Iterator, self).__init__(scene, "Scene Iterator")
self.scene = self.obj
"""
:param Method function: The function to recurse (parameters passed)
"""
def iterate(self, function, **kwargs):
self.process_node(function, self.scene, **kwargs)
def process_node(self, function, scene, **kwargs):
for key, value in scene.obj_list.iteritems():
function(key, value, **kwargs)
def iterate_devices(self, function, **kwargs):
self.process_dnode(function, self.scene, **kwargs)
def process_dnode(self, function, scene, **kwargs):
for key, value in scene.user_devices.iteritems():
function(key, value, **kwargs) |
#!/usr/local/bin/python3
from flask import Flask, request, jsonify, Response
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from sqlalchemy.orm import relationship
import os
import config
from config import db
import commons
ma = config.ma
app = config.app
class Customer(db.Model):
__tablename__ = "customer"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250))
email = db.Column(db.String(250))
password = db.Column(db.String(250))
certificates = relationship("Certificate", cascade="all, delete-orphan")
def __init__(self, name, email, password):
self.name = name
self.email = email
self.password = password
def toJSON(self):
return jsonify(id=self.id, name=self.name, email=self.email, password=self.password)
class CustomerSchema(ma.Schema):
class Meta:
# what to expose
# no reason to expose password
fields = ('id', 'name', 'email')
customer_schema = CustomerSchema()
customers_schema = CustomerSchema(many=True)
# endpoint to create new customer
@app.route("/customer", methods=["POST"])
def add_customer():
name = request.json['name']
email = request.json['email']
password = commons.hashSaltPassword(request.json['password'])
new_customer = Customer(name, email, password)
db.session.add(new_customer)
db.session.commit()
response = new_customer.toJSON()
response.status_code = 200
return response
# endpoint to show all customers
@app.route("/customer", methods=["GET"])
def get_customer():
all_customers = Customer.query.all()
result = customers_schema.dump(all_customers)
return jsonify(result.data)
# endpoint to get customer detail by id
@app.route("/customer/<id>", methods=["GET"])
def customer_detail(id):
customer = Customer.query.get(id)
return customer_schema.jsonify(customer)
# endpoint to update customer
@app.route("/customer/<id>", methods=["PUT"])
def customer_update(id):
customer = Customer.query.get(id)
name = request.json['name']
email = request.json['email']
customer.email = email
customer.name = name
db.session.commit()
return customer_schema.jsonify(customer)
# endpoint to delete customer
@app.route("/customer/<id>", methods=["DELETE"])
def customer_delete(id):
customer = Customer.query.get(id)
db.session.delete(customer)
db.session.commit()
return customer_schema.jsonify(customer)
|
import logging
import time
import json
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
wait_timeout = int(os.environ.get('JOB_WAIT_TIMEOUT',1800))
def is_job_complete():
# Check for completion of your long running job here
return True
def get_wait_status(time_to_wait, input_secs_before_timeout):
if is_job_complete():
return {'seconds_before_timeout': 0 , 'wait_status': 'JOB_COMPLETED'}
else:
if time_to_wait <= 0:
return {'seconds_before_timeout': 0 , 'wait_status': 'JOB_WAIT_TIMEOUT'}
if input_secs_before_timeout == -1:
seconds_before_timeout = int(time.time()) + time_to_wait
else:
seconds_before_timeout = input_secs_before_timeout - int(time.time())
if seconds_before_timeout > 0:
return {'seconds_before_timeout': seconds_before_timeout, 'wait_status': 'KEEP_WAITING'}
else:
return {'seconds_before_timeout': seconds_before_timeout, 'wait_status': 'JOB_WAIT_TIMEOUT'}
def lambda_handler(event, context):
input_secs_before_timeout = event.get('seconds_before_timeout',-1)
return get_wait_status(wait_timeout, input_secs_before_timeout)
|
#!/usr/bin/env python
import sys
import subprocess
def option_is_in_the_list( keyword, option, options_registered ):
for listed in options_registered.split() :
if (option == listed):
sys.exit(0)
print option, "do not exist for", keyword
print "Use one of : ", options_registered
sys.exit(-1)
# if no options are passed, return
if (len(sys.argv)==2):
sys.exit(0)
options_registered = ""
# look for known options for config
if ( (sys.argv)[1] == "config" ):
cmd = subprocess.Popen('grep config makefile|grep findstring', shell=True, stdout=subprocess.PIPE)
for line in cmd.stdout :
options_registered += (line.split(",")[1]).split()[1] + " "
for i in range(2,len(sys.argv)):
option_is_in_the_list( "config", (sys.argv)[i], options_registered)
# look for known options for machine
if ( (sys.argv)[1] == "machine" ):
# a single machine can be passed
if (len(sys.argv)!=3):
sys.exit(-1)
cmd = subprocess.Popen('ls -1 scripts/CompileTools/machine', shell=True, stdout=subprocess.PIPE)
for line in cmd.stdout :
options_registered += (line.split())[0] + " "
option_is_in_the_list( "machine", (sys.argv)[2], options_registered)
|
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns("wos.prototype.views",
url(r"^$", "search"),
) |
# -*- coding: utf-8 -*-
import json
import urllib2
import os
import re
wikiUrl = 'http://wiki.hackersanddesigners.nl/mediawiki/'
f = open('handd-book.wiki', 'w')
def get_pages(from_page):
pages = []
pageUrl = wikiUrl + 'api.php?action=parse&page=' + from_page + '&format=json&disableeditsection=true&prop=wikitext'
wikiJson = json.load(urllib2.urlopen(pageUrl))
try:
wikistr = wikiJson['parse']['wikitext']['*'].encode('utf-8').strip()
reobj = re.compile(r'\[\[[A-Za-z0-9\ \(\)\-\'\$\€,]*\]\]', re.IGNORECASE)
res = reobj.findall(wikistr)
for mat in res:
page = mat.replace('[[','')
page = page.replace(']]','')
page = page.replace(' ','_')
pages.append(page)
except Exception, e:
print e
return pages
def get_image(filename):
# http://wiki.hackersanddesigners.nl/mediawiki/api.php?action=query&titles=File:Chicken-and-Potato-Soup.png&prop=imageinfo&&iiprop=url&format=json
if os.path.exists(filename):
return
wikiJson = json.load(urllib2.urlopen(wikiUrl + 'api.php?action=query&titles=File:' + filename + '&prop=imageinfo&&iiprop=url&format=json'))
print wikiJson
try:
pages = wikiJson['query']['pages']
print pages
for key, value in pages.iteritems():
url = value['imageinfo'][0]['url']
print url
img_res = urllib2.urlopen(url)
img_file = open(filename, 'wb')
img_file.write(img_res.read())
img_file.close()
except Exception, e:
print e
pages = get_pages('Book_sprint_2015')
for page in pages:
pageUrl = wikiUrl + 'api.php?action=parse&page=' + page + '&format=json&disableeditsection=true&prop=wikitext|images|links'
print pageUrl
wikiJson = json.load(urllib2.urlopen(pageUrl))
wikistr = ''
try:
title = wikiJson['parse']['title'].encode('utf-8').strip()
print title
wikistr += '\n\n=' + title + '=\n\n'
except Exception, e:
print e
try:
# Get images - JBG
imgs = wikiJson['parse']['images']
for img in imgs:
img = img.encode('utf-8').strip()
print ' - ' + img
get_image(img)
except Exception, e:
print e
try:
wikistr += wikiJson['parse']['wikitext']['*'].encode('utf-8').strip()
wikistr = re.sub(r'\|\d*(x\d*)?px', '', wikistr) # Remove px info from images - JBG
wikistr = re.sub(r'{{[A-Za-z0-9#:|/.?= \n&\-\\\”\{\}]*}}', '', wikistr) # Remove youtube links - JBG
# Replace internal wiki links with external links for footnotes - JBG
for link in wikiJson['parse']['links']:
link_str = link['*'].encode('utf-8').strip()
prep_str = link_str.replace(' ', '_')
wikistr = re.sub(r'\[\[' + link_str + '[A-Za-z0-9\(\)| ]*\]\]', '[' + wikiUrl + 'index.php/' + prep_str + ' ' + link_str + ']', wikistr)
f.write(wikistr)
except Exception, e:
print e
|
#!/usr/bin/env python
from distutils.core import setup
from distutils.core import Extension
spammodule = Extension('spam', sources=['spammodule.c'])
setup(name='spam',
version='1.0',
description='spam',
author='Junlong Xie',
author_email='decimalbell@gmail.com',
url='https://github.com/decimalbell/devnull',
ext_modules=[spammodule])
|
import array
import serial
import camabio
data = [0x55,0xaa,0x30,0x01,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x30,0x01]
camabio.setChksum(data)
print('abriendo puerto seriel')
ser = serial.Serial(port="/dev/ttyUSB0",baudrate=9600,timeout=5,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS)
print('escribiendo bytes en el puerto serie')
camabio.printArray(data)
setBauds = array.array('B', data).tostring()
ser.write(setBauds);
ser.flush()
print('tratando de leer bytes desde el puerto serie: ')
data2 = ser.read(len(data))
if data2 == None:
print('No se leyo ningun byte')
else:
camabio.printHexString(data2)
|
#coding: utf-8
import os
from util_settings import *
ADMINS = (
('Arruda', 'felipe.pontes@uniriotec.br'),
)
MANAGERS = ADMINS
TIME_ZONE = 'America/Sao_Paulo'
LANGUAGE_CODE = 'pt-br'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
# Login/Logout URL
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/dashboard/'
#AUTH_PROFILE_MODULE = 'usuarios.PerfilUsuario'
#Quando tiver o servidor SMTP trocar as infos daqui para utilizar o mesmo.
#EMAIL_HOST = 'smtp.gmail.com'
#EMAIL_HOST_USER = 'email@email.br'
#EMAIL_HOST_PASSWORD = 'pass'
#EMAIL_USE_TLS = True
AUTHENTICATION_BACKENDS = (
'user_backends.email_username.EmailOrUsernameModelBackend',
'django.contrib.auth.backends.ModelBackend',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
# default template context processors
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
#AMAO
'context_processors.aluno_monitor_professor',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
LOCAL('templates'),
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
CORRETORES = (
# ('nome','Descrição','app.corretor.classe'),
# ('Base','Um corretor basico.','Corretor.base.Corretor'),
(1,'CPP','Um corretor basico de c++.','Corretor.corretor.corretor_cpp.CorretorCPP'),
)
#safeexec
SAFEEXEC_PATH = LOCAL('safeexec/safeexec')
#CORRETORES= (
# ( 0, 'aguardando', u'Aguardando Pagamento'),
# ( 1, 'aguardando', u'Aguardando Pagamento'),
# )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--rednose','--testmatch=^test','--exclude-dir-file=nose_exclude.txt','-s',]#(,'--with-notify')
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS':False,
}
|
import os
def make_phas(obs, det):
for row in range(0,21):
cmppha_cmd = 'cmppha infile='+obs+'_'+det+'_srcspectra_v01.pha outfile='+det+'_src'+str(row+1)+'.pha cmpmode=expand rows='+'\"'+str(row+1)+'\"'
cmppha_cmd_bak = 'cmppha infile='+obs+'_'+det+'_bkgspectra.bak outfile='+det+'_bak'+str(row+1)+'.bak cmpmode=expand rows='+'\"'+str(row+1)+'\"'
# ~ cmppha_cmd_rsp = 'cmppha infile='+obs+'_'+det+'_weightedrsp.rsp outfile='+det+'_weightedrsp'+str(row+1)+'.rsp cmpmode=expand rows='+'\"'+str(row+1)+'\"'
print(cmppha_cmd)
print(cmppha_cmd_bak)
# ~ print(cmppha_cmd_rsp)
print("Making pha I for ", row, " ", cmppha_cmd)
os.system(cmppha_cmd)
os.system(cmppha_cmd_bak)
# ~ os.system(cmppha_cmd_rsp)
obs_id = 'bn190829830'
make_phas(obs_id, 'n6')
make_phas(obs_id, 'n7')
# ~ make_phas(obs_id, 'b0')
make_phas(obs_id, 'b1')
# ~ make_phas(obs_id, 'n7')
# ~ make_phas(obs_id, 'n8')
|
# python example to train doc2vec model (with or without pre-trained word embeddings)
import argparse
import os
import gensim.models as g
import logging
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--method", type=str, default="PVDM", help="PVDM | PVDBOW")
parser.add_argument("--dict_size", type=int, default=20000, help="the max size of word dictionary")
parser.add_argument("--data_folder", type=str, default="ACL", help="ACL | Markov | huffman_tree | two_tree")
parser.add_argument("--data_type", type=str, default="news", help="movie | news | tweet")
parser.add_argument("--unlabeled_data_num", type=int, default=50000, help="how many unlabeled data samples to use")
parser.add_argument("--embedding_size", type=int, default=256, help="word and doc embedding size")
parser.add_argument("--num_epochs", type=int, default=300, help="epoch num")
parser.add_argument("--window_size", type=int, default=15, help="sliding window size")
parser.add_argument("--concat", type=int, default=1, help="1 for concat word vectors, 0 for sum or average")
parser.add_argument("--min_count", type=int, default=1,
help="Ignores all words with total frequency lower than this")
parser.add_argument("--sampling_threshold", type=float, default=1e-5,
help="the threshold for configuring which higher-frequency words are randomly downsampled, useful range is (0, 1e-5")
parser.add_argument("--worker_count", type=int, default=1, help="Use these many worker threads to train the model")
parser.add_argument("--negative_samples", type=int, default=5,
help="how many noise words should be drawn in negative sampling")
args = parser.parse_args()
args.model = "doc2vec"
dataset_dir = os.path.join("dataset", args.data_folder, args.data_type)
unlabeled_text_dir = os.path.join(dataset_dir, args.data_type + '.txt')
model_dir = os.path.join(args.model, args.data_folder, args.data_type,
str(args.unlabeled_data_num),args.method)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.NOTSET,
filename=os.path.join(model_dir, "log.txt"))
train_txt = os.path.join(model_dir, 'unlabeled_' + str(args.unlabeled_data_num) + '.txt')
if not os.path.exists(train_txt):
with open(unlabeled_text_dir, 'r', encoding='utf8', errors='ignore') as f:
all_lines = f.readlines()
with open(train_txt, 'w', encoding='utf8') as f:
f.writelines(all_lines[:args.unlabeled_data_num])
with open(os.path.join(model_dir, 'config_info.txt'), 'w', encoding='utf8') as f:
print(str(args), file=f)
docs = g.doc2vec.TaggedLineDocument(train_txt)
if args.method=="PVDM":
dm=1
else:
dm=0
model = g.Doc2Vec(docs, vector_size=args.embedding_size, max_vocab_size=args.dict_size, window=args.window_size,
min_count=args.min_count, sample=args.sampling_threshold, workers=args.worker_count, hs=0,
dm=dm, negative=args.negative_samples, dbow_words=1, dm_concat=args.concat,
epochs=args.num_epochs)
saved_path = os.path.join(model_dir, 'model.bin')
# save model
model.save(saved_path)
|
fooLetters = ["r", "t", "c", "d", "b"]
alphabet = ["t", "w", "h", "z", "k", "d", "f", "v", "c", "j", "x", "l", "r", "n", "q", "m", "g", "p", "s", "b"] # Bloogan alphabet
vocabulary = [] # List to insert all non-repeated words
def order(vocabulary): # This function serves to get all the words that start with the first letter from the alphabet
ordenado = []
for letter in alphabet:
arr = []
for word in vocabulary:
if(word[0] == letter):
arr.append(word) # It throws the words in this array
if(len(arr) != 0): ordenado.append(orderResto(arr)); # Then call this other function to sort the rest of the word
return ordenado
def orderResto(arr): # This function serves to sort the word after the second letter, 'cause the first letter is already filtered
arrOrder = [] # This array will be returned to *order* function with the words ordered
for word in arr:
if (len(arrOrder) == 0): # Checks to see if arrOrder is empty
arrOrder.append(word)
else:
for index, palavra in enumerate(arrOrder): # It starts checking the actual word with the words in the arrOrder
aux = 1
if(word in arrOrder): break # Check to see if word is already in arrOrder
if (word[aux] == palavra[aux]): # Check if the second letter is equal
if (aux + 1 < len(palavra)):
if(something(palavra, word, aux + 1, index) == 1): arrOrder.insert(index, word) # Call this function to check recursive the others letters
else:
arrOrder.insert(index, word) # If the word is shorter and the letters is equal than the previous one, it will be inserted in that position
break
elif (alphabet.index(word[aux]) < alphabet.index(palavra[aux])): # Check if the second letter in alphabet is before
arrOrder.insert(index, word)
break
if(word not in arrOrder): arrOrder.append(word) # If none of that works, the word will be put in the end of array
return arrOrder # Return the array sorted
def something(palavra, word, aux, index): # It recursive the rest of the word. It will return 1 if the words can be inserted, otherwise will return 0
if(word[aux] == palavra[aux]):
if(aux+1 < len(palavra)):
something(palavra, word, aux+1, index)
return
else:
return 1
if (alphabet.index(word[aux]) < alphabet.index(palavra[aux])):
return 1
return 0
text = input().split() # Takes the entry and split in a list
# Variables used on the "main"
prepositions = 0; verbSubjunctive = 0; verbs = 0; numbers = []; prettyNumbers = []
for words in text: # Iterate through the list
if(len(words) == 5): # Checking to see which word is a preposition
if("l" not in words and words[-1] not in fooLetters):
prepositions += 1
elif(len(words) > 7 and words[-1] not in fooLetters): # Check to see which word is a verb
verbs += 1
if(words[0] not in fooLetters): # Check to see if the verb is in the subjunctive form
verbSubjunctive += 1
if(words not in vocabulary): # Remove words that are repeated in text to sort after
vocabulary.append(words)
value = 0
for index, letter in enumerate(words): # Make the sum of the word
value += alphabet.index(letter) * (20 ** index)
if (value > 422224 and (value % 3) == 0 and value not in prettyNumbers): # Check to see if the value of the word is a pretty number
prettyNumbers.append(value)
vocabulary.sort() # It sorts the non-repeated word's list
ordered = order(vocabulary) # Then call the function to sort the words in Bloogan alphabet
print("There are " + str(prepositions) + " prepositions in Text B")
print("There are " + str(verbs) + " verbs in Text B")
print("There are " + str(verbSubjunctive) + " subjunctive verbs in Text B")
print("In Text B, there are " + str(len(prettyNumbers)) + " distinct(!) pretty numbers\n")
for word in ordered:
print(" ".join(word))
|
def helper_parse_if(if_string : str):
"""
Parses the if_string manually to test for equality between its
members.
>>> helper_parse_if("this == this")
True
>>> helper_parse_if("2>3")
False
>>> helper_parse_if("40 >= 40")
True
"""
try:
if "!=" in if_string:
spl = if_string.split("!=")
return spl[0].strip() != spl[1].strip()
if "==" in if_string:
spl = if_string.split("==")
return spl[0].strip() == spl[1].strip()
if ">=" in if_string:
spl = if_string.split(">=")
return float(spl[0].strip()) >= float(spl[1].strip())
if "<=" in if_string:
spl = if_string.split("<=")
return float(spl[0].strip()) <= float(spl[1].strip())
if ">" in if_string:
spl = if_string.split(">")
return float(spl[0].strip()) > float(spl[1].strip())
if "<" in if_string:
spl = if_string.split("<")
return float(spl[0].strip()) < float(spl[1].strip())
except:
return None
return None
def helper_split(split_string : str, easy : bool = True):
"""
A helper method to universalize the splitting logic used in multiple
blocks and adapters. Please use this wherever a verb needs content to
be chopped at | , or ~!
>>> helper_split("this, should|work")
["this, should", "work"]
"""
if "|" in split_string:
return split_string.split("|")
if easy and "~" in split_string:
return split_string.split("~")
if easy and "," in split_string:
return split_string.split(",")
return None
def helper_parse_list_if(if_string):
split = helper_split(if_string, False)
if split is None:
return [helper_parse_if(if_string)]
results = []
for item in split:
results.append(helper_parse_if(item))
return results |
"""
Created by Alex Wang
on 2017-07-26
flask服务,默认是阻塞非异步的
"""
import os
from flask import Flask, request
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = ""
a = tf.placeholder(tf.int32, shape=(), name="input")
asquare = tf.multiply(a, a, name="output")
sess = tf.Session()
app = Flask(__name__)
@app.route('/')
def hello_world():
return "Hellow World"
def response_request():
num = request.args.get('num')
for i in range (100):
ret = sess.run([asquare], feed_dict={a: num})
return str(ret)
# return "hello"
if __name__ == "__main__":
app.add_url_rule("/hello", view_func=response_request)
app.run(host='127.0.0.1',port=18998, debug=True) |
import json
import os
from django.conf import settings
from translations.models import TranslationKey
def get_translations():
with open(os.path.join(settings.BASE_DIR,"migration/translations/roadmap2019.json"), encoding='utf-8') as file:
data = json.load(file)
return data
def run():
translations = get_translations()
for key, translation in translations.items():
key = TranslationKey.objects.create(
slug=key,
default_translation=translation
) |
# File: p (Python 2.4)
from pirates.teleport.DoorTeleportActor import DoorTeleportActor
class InteriorDoorTeleportActor(DoorTeleportActor):
pass
|
import pandas as pd
empty = pd.DataFrame()
print(empty)
data = ["a", "b", "c"]
print(data)
new1 = pd.DataFrame(data)
print(new1) |
import feedparser
import re
import docclass
def read(feed,classifier):
f=feedparser.parse(feed)
for entry in f['entries']:
#print(entry)
print ()#这一行在干嘛
print('-----')
#print('title')
#print(entry['title'].encode('utf-8'))
print ('Title: ',entry['title'].encode('utf-8'))
print ('Publisher: ',entry['publisher'].encode('utf-8'))
print ()
print (entry['summary'].encode('utf-8'))
'''
print ('Title: '+entry['title'].encode('utf-8'))
print ('Publisher: '+entry['publisher'].encode('utf-8'))
print ()
print (entry['summary'].encode('utf-8'))
'''
#fulltext='%s\n%s\n%s' % (entry['title'],entry['publisher'],entry['summary'])
#print(fulltext)
#aaa=str(classifier.classify(fulltext))
#print(aaa)
#print ('Guess: '+str(classifier.classify(fulltext)))
print ('Guess: '+str(classifier.classify(entry)))
c1=input('Enter category: ')
#print('hhh',entry)
#print(c1)
#classifier.train(fulltext,c1)
classifier.train(entry,c1)
def entryfeatures(entry):
splitter=re.compile('\\W*')
f={}
titlewords=[s.lower() for s in splitter.split(entry['title'])
if len(s)>2 and len(s)<20]
for w in titlewords: f['Title:'+w]=1
summarywords=[s.lower() for s in splitter.split(entry['summary'])
if len(s)>2 and len(s)<20]
uc=0
for i in range(len(summarywords)):
w=summarywords[i]
f[w]=1
if w.isupper(): uc+=1
if i<len(summarywords)-1:
twowords=' '.join(summarywords[i:i+1])
f[twowords]=1
f['Publisher:'+entry['publisher']]=1
if float(uc)/len(summarywords)>0.3: f['UPPERCASE']=1
return f
|
import math
def quadratic(a, b, c):
if not isinstance(a | b | c, (int, float)):
raise TypeError('bad operand type')
else:
type = b * b - 4 * a * c
if type > 0:
x1 = (-b + math.sqrt(type)) / (2 * a)
x2 = (-b - math.sqrt(type)) / (2 * a)
return x1, x2
elif type == 0:
x1 = x2 = (-b) / (2 * a)
return x1, x2
else:
return '此方程无解'
# 测试:
# a = int(input('请输入a: '))
# b = int(input('请输入b: '))
# c = int(input('请输入c: '))
print('quadratic(2, 3, 1) =', quadratic(2, 3, 1))
print('quadratic(1, 3, -4) =', quadratic(1, 3, -4))
if quadratic(2, 3, 1) != (-0.5, -1.0):
print('测试失败')
elif quadratic(1, 3, -4) != (1.0, -4.0):
print('测试失败')
else:
print('测试成功')
|
#!/usr/bin/env python3
#
# Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# Script to build a Debian packages from a Dart tarball. The script
# will build a source package and a 32-bit (i386) and 64-bit (amd64)
# binary packages.
import optparse
import os
import sys
import tarfile
import subprocess
from os.path import join, exists, abspath, dirname
sys.path.append(join(dirname(__file__), '..'))
import utils
from shutil import copyfile
HOST_OS = utils.GuessOS()
HOST_CPUS = utils.GuessCpus()
DART_DIR = abspath(join(dirname(__file__), '..', '..'))
GN_ARCH_TO_DEBIAN_ARCH = {
"ia32": "i386",
"x64": "amd64",
"arm": "armhf",
"arm64": "arm64",
"riscv64": "riscv64",
}
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--tar_filename",
default=None,
help="The tar file to build from.")
result.add_option("--out_dir",
default=None,
help="Where to put the packages.")
result.add_option("-a",
"--arch",
help='Target architectures (comma-separated).',
metavar='[all,ia32,x64,arm,arm64,riscv64]',
default='x64')
result.add_option("-t",
"--toolchain",
help='Cross-compilation toolchain prefix',
default=None)
return result
def RunBuildPackage(opt, cwd, toolchain=None):
env = os.environ.copy()
if toolchain != None:
env["TOOLCHAIN"] = '--toolchain=' + toolchain
cmd = ['dpkg-buildpackage', '-j%d' % HOST_CPUS]
cmd.extend(opt)
process = subprocess.check_call(cmd, cwd=cwd, env=env)
def BuildDebianPackage(tarball, out_dir, arches, toolchain):
version = utils.GetVersion()
tarroot = 'dart-%s' % version
origtarname = 'dart_%s.orig.tar.gz' % version
if not exists(tarball):
print('Source tarball not found')
return -1
with utils.TempDir() as temp_dir:
origtarball = join(temp_dir, origtarname)
copyfile(tarball, origtarball)
with tarfile.open(origtarball) as tar:
tar.extractall(path=temp_dir)
# Build source package.
print("Building source package")
RunBuildPackage(['-S', '-us', '-uc'], join(temp_dir, tarroot))
# Build binary package(s).
for arch in arches:
print("Building %s package" % arch)
RunBuildPackage(
['-B', '-a', GN_ARCH_TO_DEBIAN_ARCH[arch], '-us', '-uc'],
join(temp_dir, tarroot))
# Copy the Debian package files to the build directory.
debbase = 'dart_%s' % version
source_package = [
'%s-1.dsc' % debbase,
'%s.orig.tar.gz' % debbase,
'%s-1.debian.tar.xz' % debbase
]
for name in source_package:
copyfile(join(temp_dir, name), join(out_dir, name))
for arch in arches:
name = '%s-1_%s.deb' % (debbase, GN_ARCH_TO_DEBIAN_ARCH[arch])
copyfile(join(temp_dir, name), join(out_dir, name))
def Main():
if HOST_OS != 'linux':
print('Debian build only supported on linux')
return -1
options, args = BuildOptions().parse_args()
out_dir = options.out_dir
tar_filename = options.tar_filename
if options.arch == 'all':
options.arch = 'ia32,x64,arm,arm64,riscv64'
arch = options.arch.split(',')
if not options.out_dir:
out_dir = join(DART_DIR, utils.GetBuildDir(HOST_OS))
if not tar_filename:
tar_filename = join(DART_DIR, utils.GetBuildDir(HOST_OS),
'dart-%s.tar.gz' % utils.GetVersion())
BuildDebianPackage(tar_filename, out_dir, arch, options.toolchain)
if __name__ == '__main__':
sys.exit(Main())
|
'''
Created on 2017年2月21日
@author: admin
'''
#coding=utf-8
import unittest
import HTMLTestRunner
import os, time, datetime
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
# 这里需要导入测试文件
import pythonmakeauto.pythonBase.selenium.baidu as baidu, pythonmakeauto.pythonBase.selenium.youdao as youdao
# 定义发送邮件
def sentmail(file_new):
# 发信邮箱
mail_from = 'fnngj@126.com'
# 收信邮箱
mail_to = '123456@qq.com'
# 定义正文
f = open(file_new, 'rb')
mail_body = f.read()
f.close()
msg=MIMEText(mail_body, _subtype='html', _charset='utf-8')
# 定义标题
msg['Subject']=u'私有云测试报告'
# 定义发送时间
msg['date']=time.strftime('%a, %d %b %Y %H:%M%S %z')
smtp=smtplib.SMTP()
# 连接 SMTP 服务器
smtp.connect('smtp.126.com')
# 用户名密码
smtp.login('fnngj@126.com', '123456')
smtp.sendmail(mail_from, mail_to, msg.as_string())
smtp.quit()
print('email has send out !')
# 查找测试报告,调用发邮件功能
def sendreport():
result_dir = 'D:\\selenium_python\\report'
lists = os.listdir(result_dir)
lists.sort(key=lambda fn: os.path.getmtime(result_dir+"\\"+fn) if not
os.path.isdir(result_dir+"\\" +fn) else 0)
print(u'上一次测试生成的报告:' +lists[-2])
# 找到上一次测试生成的文件
file_new = os.path.join(result_dir, lists[-2])
print(file_new)
# 调用发邮件模块
sentmail(file_new)
testunit = unittest.TestSuite()
# 将测试用例加入到测试容器(套件)中
testunit.addTest(unittest.makeSuite(baidu.Baidu))
testunit.addTest(unittest.makeSuite(youdao.Youdao))
# 执行测试套件
runner = unittest.TextTestRunner()
runner.run(testunit)
if __name__ == "__main__":
# 执行发邮件
sendreport()
|
"""Subpackage comprising various utility functions used elsewhere in aospy."""
from . import io
from . import longitude
from .longitude import Longitude
from . import times
from . import vertcoord
__all__ = ['Longitude', 'io', 'longitude', 'times', 'vertcoord']
|
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import Adam
from torch.autograd import Variable
from torchvision import models
import matplotlib.pyplot as plt
import numpy as np
import MyResnet
import math
import os
# load model
device = "cpu"
net = MyResnet.ResNet(MyResnet.ResBlock).to(device)
PATH = './cifar_net_224.pth'
net.load_state_dict(torch.load(PATH,map_location='cpu'))
# Datasets CIFAR10
transform = transforms.Compose([transforms.Resize((224,224)), \
transforms.ToTensor(), \
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=1,
shuffle=False, num_workers=0)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
PLOT_DIR = './out/plots'
def create_dir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def prepare_dir(path, empty=False):
if not os.path.exists(path):
create_dir(path)
def prime_powers(n):
factors = set()
for x in range(1, int(math.sqrt(n)) + 1):
if n % x == 0:
factors.add(int(x))
factors.add(int(n // x))
return sorted(factors)
def get_grid_dim(x):
factors = prime_powers(x)
if len(factors) % 2 == 0:
i = int(len(factors) / 2)
return factors[i], factors[i - 1]
i = len(factors) // 2
return factors[i], factors[i]
def plot_conv_output(conv_img, name):
plot_dir = os.path.join(PLOT_DIR, 'conv_output')
plot_dir = os.path.join(plot_dir, name)
prepare_dir(plot_dir, empty=True)
w_min = np.min(conv_img)
w_max = np.max(conv_img)
num_filters = conv_img.shape[3]
grid_r, grid_c = get_grid_dim(num_filters)
fig, axes = plt.subplots(min([grid_r, grid_c]),
max([grid_r, grid_c]))
# iterate filters
for l, ax in enumerate(axes.flat):
img = conv_img[0, :, :, l]
ax.imshow(img, vmin=w_min, vmax=w_max, interpolation='bicubic', cmap='Greys')
ax.set_xticks([])
ax.set_yticks([])
plt.savefig(os.path.join(plot_dir, '{}.png'.format(name)), bbox_inches='tight')
# Get display image
dataiter = iter(testloader)
images, labels = dataiter.next()
# Get conv_outputs
outputs, conv1_output, conv5_output = net(images)
conv1_output = conv1_output.permute(0, 2, 3, 1)
conv1_output = conv1_output.detach().numpy()
plot_conv_output(conv1_output, 'conv{}'.format(1))
conv5_output = conv5_output.permute(0, 2, 3, 1)
conv5_output = conv5_output.detach().numpy()
plot_conv_output(conv5_output, 'conv{}'.format(5))
|
#Display Output
print("This is Example Py")
print("This is Child BranchA Py")
|
"""
Write a program, wordcount.py, that opens a file and
counts how many times each space-separated word occurs
in that file. Your program should then print those counts to the screen.
"""
import string
with open() as file:
lines = [line.split() for line in file] # [[as,i,was,going,to,st.,ives] [i,met,a,man,]]
words = [word for line in lines for word in line]
wordcount = {}
for word in words:
# make all words lowercase and strip of their punctuation
word_without_punct = word.lower().strip(string.punctuation)
# add to dict if word isn't in it with count of 1. Otherwise, increment
wordcount[word_without_punct] = wordcount.get(word_without_punct, 0) + 1
for k,v in wordcount.items():
print(k,v)
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" USB3 physical-layer abstraction."""
import logging
from amaranth import *
from amaranth.lib.cdc import PulseSynchronizer, FFSynchronizer
from amaranth.hdl.ast import Rose
class PHYResetController(Elaboratable):
""" Gateware responsible for bringing up a PIPE PHY.
Note that this gateware resides in `sync`, rather than one of our
SuperSpeed domains, as the SuperSpeed PHY has yet to bring up its clocks.
Attributes
----------
reset: Signal(), output
Signal that drives the PHY's ``reset`` signal.
phy_status: Signal(), input
The PIPE PHY's phy_status signal; tracks our progress in startup.
ready: Signal(), output
Status signal; asserted when the PHY has started up and is ready for use.
Parameters
----------
sync_frequency: float
The frequency of the sync clock domain.
"""
def __init__(self, *, sync_frequency):
self._sync_frequency = sync_frequency
#
# I/O port
#
self.reset = Signal()
self.ready = Signal()
self.phy_status = Signal()
def elaborate(self, platform):
m = Module()
# Keep track of how many cycles we'll keep our PHY in reset.
# This is larger than any requirement, in order to work with a broad swathe of PHYs,
# in case a PHY other than the TUSB1310A ever makes it to the market.
cycles_in_reset = int(5e-6 * 50e6)
cycles_spent_in_reset = Signal(range(cycles_in_reset + 1))
with m.FSM():
# STARTUP_RESET -- post configuration, we'll reset the PIPE PHY.
# This is distinct from the PHY's built-in power-on-reset, as we run this
# on every FPGA configuration.
with m.State("STARTUP_RESET"):
m.d.comb += [
self.reset .eq(1),
]
# Once we've extended past a reset time, we can move on.
m.d.sync += cycles_spent_in_reset.eq(cycles_spent_in_reset + 1)
with m.If(cycles_spent_in_reset == cycles_in_reset):
m.next = "DETECT_PHY_STARTUP"
# DETECT_PHY_STARTUP -- post-reset, the PHY should drive its status line high.
# We'll wait for this to happen, so we can track the PHY's progress.
with m.State("DETECT_PHY_STARTUP"):
with m.If(self.phy_status):
m.next = "WAIT_FOR_STARTUP"
# WAIT_FOR_STARTUP -- we've now detected that the PHY is starting up.
# We'll wait for that startup signal to be de-asserted, indicating that the PHY is ready.
with m.State("WAIT_FOR_STARTUP"):
# For now, we'll start up in P0. This will change once we implement proper RxDetect.
with m.If(~self.phy_status):
m.next = "READY"
# READY -- our PHY is all started up and ready for use.
# For now, we'll remain here until we're reset.
with m.State("READY"):
m.d.comb += self.ready.eq(1)
return m
class LinkPartnerDetector(Elaboratable):
""" Light abstraction over our PIPE receiver detection mechanism.
Primarily responsible for the power state sequencing necessary during receiver detection.
Attributes
----------
request_detection: Signal(), input
Strobe; requests that a receiver detection will be performed.
power_state: Signal(2), output
Controls the PHY's power state signals.
detection_control: Signal(), output
Controls the PHY's partner detection signal.
phy_status: Signal(), input
Status signal; asserted when the PHY has completed our request.
rx_status: Signal(3), input
Status signal; indicates the result of our request.
new_result: Signal(), output
Strobe; indicates when a new result is ready on :attr:``partner_present``.
partner_present: Signal(), output
High iff a partner was detected during the last detection cycle.
Parameters
----------
rx_status: Array(Signal(3), Signal(3))
Read-only view of the PHY's rx_status signal.
"""
def __init__(self):
#
# I/O port
#
self.request_detection = Signal()
self.power_state = Signal(2, reset=2)
self.detection_control = Signal()
self.phy_status = Signal()
self.rx_status = Signal(3)
self.new_result = Signal()
self.partner_present = Signal()
def elaborate(self, platform):
m = Module()
# Partner detection is indicated by the value `011` being present on RX_STATUS
# after a detection completes.
PARTNER_PRESENT_STATUS = 0b011
with m.FSM(domain="ss"):
# IDLE_P2 -- our post-startup state; represents when we're IDLE but in P2.
# This is typically only seen at board startup.
with m.State("IDLE_P2"):
m.d.comb += self.power_state.eq(2)
with m.If(self.request_detection):
m.next = "PERFORM_DETECT"
# PERFORM_DETECT -- we're asking our PHY to perform the core of our detection,
# and waiting for that detection to complete.
with m.State("PERFORM_DETECT"):
# Per [TUSB1310A, 5.3.5.2], we should hold our detection control high until
# PhyStatus pulses high; when we'll get the results of our detection.
m.d.comb += [
self.power_state .eq(2),
self.detection_control .eq(1)
]
# When we see PhyStatus strobe high, we know our result is in RxStatus.
for i in range(2):
# When our detection is complete...
with m.If(self.phy_status):
# ... capture the results, but don't mark ourselves as complete, yet, as we're
# still in P2. We'll need to move to operational state.
m.d.ss += self.partner_present.eq(self.rx_status == PARTNER_PRESENT_STATUS)
m.next = "MOVE_TO_P0"
# MOVE_TO_P0 -- we've completed a detection, and now are ready to move (back) into our
# operational state.
with m.State("MOVE_TO_P0"):
# Ask the PHY to put us back down in P0.
m.d.comb += self.power_state.eq(0)
# Once the PHY indicates it's put us into the relevant power state, we're done.
# We can now broadcast our result.
with m.If(self.phy_status):
m.d.comb += self.new_result.eq(1)
m.next = "IDLE_P0"
# IDLE_P0 -- our normal operational state; usually reached after at least one detection
# has completed successfully. We'll wait until another detection is requested.
with m.State("IDLE_P0"):
m.d.comb += self.power_state.eq(0)
# We can only perform detections from P2; so, when the user requests a detection, we'll
# need to move back to P2.
with m.If(Rose(self.request_detection)):
m.next = "MOVE_TO_P2"
# MOVE_TO_P2 -- our user has requested a detection, which we can only perform from P2.
# Accordingly, we'll move to P2, and -then- perform our detection.
with m.State("MOVE_TO_P2"):
# Ask the PHY to put us into P2.
m.d.comb += self.power_state.eq(2)
# Once the PHY indicates it's put us into the relevant power state, we can begin
# our link partner detection.
with m.If(self.phy_status):
m.next = "PERFORM_DETECT"
return m
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ventana_principal.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pantalla_inicio = QtWidgets.QLabel(self.centralwidget)
self.pantalla_inicio.setGeometry(QtCore.QRect(260, 150, 321, 121))
font = QtGui.QFont()
font.setPointSize(20)
self.pantalla_inicio.setFont(font)
self.pantalla_inicio.setObjectName("pantalla_inicio")
mainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(mainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
self.menuLibros = QtWidgets.QMenu(self.menubar)
self.menuLibros.setObjectName("menuLibros")
mainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.submenu_listar_libros = QtWidgets.QAction(mainWindow)
self.submenu_listar_libros.setObjectName("submenu_listar_libros")
self.submenu_insertar_libro = QtWidgets.QAction(mainWindow)
self.submenu_insertar_libro.setObjectName("submenu_insertar_libro")
self.submenu_inicio = QtWidgets.QAction(mainWindow)
self.submenu_inicio.setObjectName("submenu_inicio")
self.submenu_list_widget_libros = QtWidgets.QAction(mainWindow)
self.submenu_list_widget_libros.setObjectName("submenu_list_widget_libros")
self.submenu_table_widget_libros = QtWidgets.QAction(mainWindow)
self.submenu_table_widget_libros.setObjectName("submenu_table_widget_libros")
self.menuLibros.addAction(self.submenu_listar_libros)
self.menuLibros.addAction(self.submenu_insertar_libro)
self.menuLibros.addAction(self.submenu_inicio)
self.menuLibros.addAction(self.submenu_list_widget_libros)
self.menuLibros.addAction(self.submenu_table_widget_libros)
self.menubar.addAction(self.menuLibros.menuAction())
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowTitle(_translate("mainWindow", "MainWindow"))
self.pantalla_inicio.setText(_translate("mainWindow", "Bienvenido a mi Aplicacion \n"
" de libreria"))
self.menuLibros.setTitle(_translate("mainWindow", "Libros"))
self.submenu_listar_libros.setText(_translate("mainWindow", "Listar Libros"))
self.submenu_insertar_libro.setText(_translate("mainWindow", "Insertar Libros"))
self.submenu_inicio.setText(_translate("mainWindow", "inicio"))
self.submenu_list_widget_libros.setText(_translate("mainWindow", "listar libros usando list widget"))
self.submenu_table_widget_libros.setText(_translate("mainWindow", "listar libros usando tabla widget"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
mainWindow = QtWidgets.QMainWindow()
ui = Ui_mainWindow()
ui.setupUi(mainWindow)
mainWindow.show()
sys.exit(app.exec_())
|
from functools import partial
from typing import Callable, Dict
import numpy as np
from osmo_camera.stats.msorm import image_msorm
# Running numpy calculations against this axis aggregates over the image for each channel, as color channels are axis=2
IMAGE_AXES = (0, 1)
image_mean = partial(np.mean, axis=IMAGE_AXES)
image_median = partial(np.median, axis=IMAGE_AXES)
image_min = partial(np.amin, axis=IMAGE_AXES)
image_max = partial(np.amax, axis=IMAGE_AXES)
image_stdev = partial(np.std, axis=IMAGE_AXES)
def image_outlier_warning(image):
return image_msorm(image) - np.mean(image, axis=IMAGE_AXES) > 0.001
def image_coefficient_of_variation(image):
return image_stdev(image) / image_mean(image)
# Type annotation clears things up for Mypy
roi_statistic_calculators: Dict[str, Callable] = {
"msorm": image_msorm,
"mean": image_mean,
"median": image_median,
"outlier_warning": image_outlier_warning,
"min": image_min,
"max": image_max,
"stdev": image_stdev,
"cv": image_coefficient_of_variation,
**{
f"percentile_{percentile}": partial(
np.percentile, q=percentile, axis=IMAGE_AXES
)
for percentile in [99, 95, 90, 75, 50, 25]
},
}
|
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class bienvenida(Page):
timeout_seconds = 60
def is_displayed(self):
return self.round_number == 1
class instrucciones_practica(Page):
timeout_seconds = 60
def is_displayed(self):
return self.round_number == 1
def vars_for_template(self):
return {
"meritocracia" : self.session.config["meritocracia"]
}
class instrucciones_torneo(Page):
timeout_seconds = 60
def is_displayed(self):
return self.round_number == 2
def vars_for_template(self):
return {
"observabilidad" : self.session.config["observabilidad"]
}
class tarea_practica(Page):
def is_displayed(self):
return self.round_number == 1
form_model = 'player'
form_fields = ['palabras', 'mistakes']
if Constants.use_timeout:
timeout_seconds = Constants.seconds_per_period
def vars_for_template(self):
legend_list = [j for j in range(5)]
task_list = [j for j in range(Constants.letters_per_word)]
task_width = 90 / Constants.letters_per_word
return {'legend_list': legend_list,
'task_list': task_list,
'task_width': task_width,
}
class tarea_torneo(Page):
def is_displayed(self):
return self.round_number > 1
form_model = 'player'
form_fields = ['palabras', 'mistakes']
if Constants.use_timeout:
timeout_seconds = Constants.seconds_per_period
def vars_for_template(self):
legend_list = [j for j in range(5)]
task_list = [j for j in range(Constants.letters_per_word)]
task_width = 90 / Constants.letters_per_word
return {'legend_list': legend_list,
'task_list': task_list,
'task_width': task_width,
"pago_A": Constants.pago_A ,
"pago_B": Constants.pago_B,
"contrato_A": self.player.contrato_A}
class calculos(WaitPage):
wait_for_all_groups = True
def after_all_players_arrive(self):
self.subsession.set_ranking()
self.subsession.set_ranking_grupos()
self.subsession.set_posiciones_jugadores()
class resultados_practica(Page):
def is_displayed(self):
return self.round_number == 1
def vars_for_template(self):
return {
"palabras": self.player.palabras,
"ronda": self.round_number - 1,
}
class resultados_torneo(Page):
def is_displayed(self):
return self.round_number > 1
def vars_for_template(self):
return {
"ronda": self.round_number - 1, #Restar 1 al número de rondas. Ronda 0 = Práctica
"palabras" : self.player.palabras,
"pago_ronda": self.player.pago_ronda,
"posicion_grupo": self.player.posicion_grupo,
"contrato_A": self.player.contrato_A,
"posicion_contrato": self.player.posicion_contrato,
"probabilidad_contrato_A": "{0:.2f}".format(self.player.probabilidad_contrato_A)
}
class asignacion(Page):
def is_displayed(self):
return self.round_number < Constants.num_rounds
def vars_for_template(self):
return {
"ronda": self.round_number,
"contrato_A_torneo" : self.player.contrato_A_torneo,
}
class espera_grupos(WaitPage):
wait_for_all_groups = True
after_all_players_arrive = 'creating_groups'
def is_displayed(self):
return self.round_number < Constants.num_rounds
class espera_pago_total(WaitPage):
wait_for_all_groups = True
def after_all_players_arrive(self):
self.subsession.set_pago_jugadores()
def is_displayed(self):
return self.round_number == Constants.num_rounds
class pago_total(Page):
def is_displayed(self):
return self.round_number == Constants.num_rounds
def vars_for_template(self):
return {
"ronda_pagar" : Constants.ronda_pagar - 1,
"pago_total" : self.player.pago.to_real_world_currency(self.session)
}
class gracias(Page):
def is_displayed(self):
return self.round_number == Constants.num_rounds
class ruleta(Page):
timeout_seconds = 12000
def is_displayed(self):
return self.round_number == 1
page_sequence = [
bienvenida,
instrucciones_practica,
instrucciones_torneo,
tarea_practica,
tarea_torneo,
calculos,
resultados_practica,
resultados_torneo,
asignacion,
espera_grupos,
espera_pago_total,
pago_total,
gracias,
]
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#created by liangj
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
import platform
import os
import math
import time
from skimage import io,measure,color
currentDir_ = "";
colorLevel_ = "";
sysstr_ = "";
COLORFULL = 1;
GRAY = 0;
interval = 8;
channel = 2; # color channel B:0 G:1 R:2s
kernel = (15,15);
def iniDir(argv):
global currentDir_;
global colorLevel_;
global sysstr_;
currentDir_ = os.path.dirname(argv[0]);
sysstr_ = platform.system();
if sysstr_ == "Windows":
currentDir_ += "\\";
colorLevel_ = currentDir_ + "separateByColorLevel\\";
else:
currentDir_ += "/";
colorLevel_ = currentDir_ + "separateByColorLevel/";
print("Working Dir is "+currentDir_);
def getRelativeDir(folderNamesArray):
global currentDir_;
relativeDir = currentDir_;
for folderName in folderNamesArray:
relativeDir += folderName;
if sysstr_ == "Windows":
relativeDir += "\\";
else:
relativeDir += "/";
return relativeDir;
def showPicForData(targetData):
# prepare for the data to draw
paintData_1 = [];
paintData_0 = [];
for item in targetData:
if int(item[4]) == 1:
paintData_1.append([item[1],item[2],item[3]]);
else:
paintData_0.append([item[1],item[2],item[3]]);
paintData_0 = np.array(paintData_0);
paintData_1 = np.array(paintData_1);
# plot pic for data
fig = plt.figure();
ax = fig.add_subplot(1,1,1, projection='3d');
ax=plt.subplot(111,projection='3d');
ax.scatter(paintData_0[::,0],paintData_0[::,1],paintData_0[::,2],c='b');
ax.scatter(paintData_1[::,0],paintData_1[::,1],paintData_1[::,2],c='r');
ax.set_zlabel('height');
ax.set_ylabel('weight');
ax.set_xlabel('age');
plt.show();
def showImageInWindow(windowName,time,image):
cv.namedWindow(windowName,cv.WINDOW_NORMAL);
cv.imshow(windowName,image);
cv.waitKey(time);
cv.destroyAllWindows();
def addColor(color,addNum):
if (color+addNum)>255:
return 255;
elif (color+addNum)<0:
return 0;
else:
return color+addNum;
def sharpProcess(image):
global currentDir_;
global channel;
for height in range(0,image.shape[0]-1):
for width in range(0,image.shape[1]-1):
colorLeft = image[height,width,channel];
colorRight = image[height,width+1,channel];
value_ = abs(int(colorLeft)-int(colorRight));
if (max(colorLeft,colorRight) != 255) and (value_ > 20) :
if image[height,width,channel] > image[height,width+1,channel]:
image[height,width,channel] = min(255,image[height,width,channel]+30);
image[height,width+1,channel] = max(0,image[height,width+1,channel]-30);
else:
image[height,width,channel] = max(0,image[height,width,channel]-30);
image[height,width+1,channel] = min(255,image[height,width+1,channel]+30);
return image;
def blurForChannels(colorImage,channelID=None):
channels = colorImage.shape[2];
global kernel;
if channelID:
colorImage[:,:,channelID] = cv.GaussianBlur(colorImage[:,:,channelID],kernel,0);
else:
for channel in range(0,channels):
colorImage[:,:,channel] = cv.GaussianBlur(colorImage[:,:,channel],kernel,0);
return colorImage;
def getImageWithBlackBg(image):
global channel;
ret,regionImage = cv.threshold(image[:,:,channel],0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU);
return regionImage;
def filterImageByBlackImage(originalImage,maskImage):
# mask image should be white and black,the background of the originalImage should be black
# in maskImage and the tissue regions should be white.
for height in range(0,maskImage.shape[0]):
for width in range(0,maskImage.shape[1]):
if maskImage[height,width]==0:
originalImage[height,width,:]=255;
return originalImage;
def reduceNoise(originImage):
newImage = np.zeros(originImage.shape,dtype=np.uint8);
newImage[::] = 255;
for height in range(0,originImage.shape[0]):
for weight in range(0,originImage.shape[1]):
if max(originImage[height,weight]) - min(originImage[height,weight]) > 20:
newImage[height,weight] = originImage[height,weight];
return newImage;
def separateColor(image,outputFormat,outputDir):
print("Func separateColor: oringin image size is");
print(image.shape);
global channel; # RED channel
pixels = [];
minInChannel = 255;
maxInChannel = 0;
for height in range(0,image.shape[0]):
for weight in range(0,image.shape[1]):
if (image[height,weight]==np.array([255,255,255])).all():
continue;
else:
if minInChannel > image[height,weight,channel]:
minInChannel = image[height,weight,channel];
if maxInChannel < image[height,weight,channel]:
maxInChannel = image[height,weight,channel];
# print("minInChannel is %d maxInChannel is %d "%(minInChannel,maxInChannel));
colorRange = maxInChannel-minInChannel;
global interval;
groups = int(math.ceil(colorRange/interval));
print("groups is %d "%(groups));
outputImages = [];
boundaries = [];
for group in range(0,groups):
newImage = np.zeros(image.shape,dtype=np.uint8);
newImage[::] = 255;
outputImages.append(newImage);
groupRangeMin = group*interval+minInChannel;
groupRangeMax = (group+1)*interval+minInChannel;
boundaries.append((groupRangeMin,groupRangeMax));
# for group in range(0,groups):
# newImage = np.zeros(image.shape,dtype=np.uint8);
# newImage[::] = 255;
# # groupRangeMin = (group-1)*interval+minInChannel;
# # groupRangeMax = group*interval+minInChannel;
# groupRangeMin = boundaries[group][0];
# groupRangeMax = boundaries[group][1];
# for height in range(0,image.shape[0]):
# for weight in range(0,image.shape[1]):
# if (image[height,weight]==np.array([255,255,255])).all():
# continue;
# elif ( (image[height,weight,channel] >= groupRangeMin) and (image[height,weight,channel] < groupRangeMax) and (max(image[height,weight]) - min(image[height,weight]) > 10) ):
# newImage[height,weight] = image[height,weight];
# cv.imwrite(outputDir+outputFormat%(group),newImage);
for height in range(0,image.shape[0]):
for weight in range(0,image.shape[1]):
pixelColor = image[height,weight];
for group in range(0,groups):
groupRangeMin = boundaries[group][0];
groupRangeMax = boundaries[group][1];
if (pixelColor ==np.array([255,255,255])).all():
continue;
elif ( (pixelColor[channel] >= groupRangeMin) and (pixelColor[channel] < groupRangeMax) ):
outputImages[group][height,weight] = pixelColor;
for group in range(0,groups):
cv.imwrite(outputDir+outputFormat%(group),outputImages[group]);
return (outputDir+outputFormat),groups;
def labelingGrayImage(imageMatrix):
labeledPic,regionsCnt = measure.label(imageMatrix,background=255,return_num=True,connectivity=2);
props = measure.regionprops(labeledPic);
print("regionsCnt is ",regionsCnt);
return props;
def judgeRegions(regionItem):
# centroid = regions[index].centroid;
# bbox = regions[index].bbox;#(min_row, min_col, max_row, max_col)
# max_x = bbox[3]-bbox[1];
# max_y = bbox[2] - bbox[0];
# size = max_x*max_y;
# r = math.ceil(max(max_x,max_y)/2);
# ratio = min(max_x,max_y)/r;
return True;
def circleOnOriginalImage(originalImage,regionImage,regionImageColor=None): #regionImage should be gray
regions = labelingGrayImage(regionImage);
if regionImageColor:
print(regions[20].perimeter);
bbox = regions[20].bbox;#(min_row, min_col, max_row, max_col)
max_x = bbox[3] - bbox[1];
max_y = bbox[2] - bbox[0];
region = regionImageColor[bbox[0]:bbox[2],bbox[1]:bbox[3]];
cv.imwrite(currentDir_+"region20.bmp",region);
for index in range(len(regions)):
regionItem = regions[index];
result = judgeRegions(regionItem);
centroid = regionItem.centroid;
bbox = regionItem.bbox;#(min_row, min_col, max_row, max_col)
max_x = bbox[3]-bbox[1];
max_y = bbox[2]-bbox[0];
size = max_x*max_y;
r = int(math.ceil(max(max_x,max_y)/2));
# ratio = min(max_x,max_y)/r;
# print("r is %d "%(r));
cv.circle(originalImage,(int(centroid[1]),int(centroid[0])), int(r), (0,0,255), 2);
# font=cv.FONT_HERSHEY_SIMPLEX;
font=cv.FONT_HERSHEY_COMPLEX_SMALL;
cv.putText(originalImage,str(index),(int(centroid[1]),int(centroid[0])), font, 1.0,(0,255,0),thickness=2,lineType=8);
return originalImage;
def main(argv):
iniDir(argv);
global currentDir_;
global colorLevel_;
global kernel;
global channel;
kernelSize = kernel[0];
# reduce noise --> bg white --> blur for channel --> output files separated by color in channel --> use one to mark image
# pick up Best Image for the Target Regions --> circle on the original --> see the Results for marking
# IMAGE B
targetDir = getRelativeDir(["separateByColorLevel","blurByKer%dChannel%d"%(kernelSize,channel),"JF14_091_S8_HE"]);
print(targetDir);
if (not os.path.isdir(targetDir)):
os.makedirs(targetDir);
time0 = time.time();
colorImage_1 = cv.imread(currentDir_+"JF14_091_S8_HE.bmp",COLORFULL);
colorImage_1 = cv.GaussianBlur(colorImage_1,kernel,0);
cv.imwrite(targetDir+"JF14_091_S8_HE_GaussianBlur.bmp",colorImage_1);
colorImage_1 = reduceNoise(colorImage_1);
cv.imwrite(targetDir+"JF14_091_S8_HE_noise.bmp",colorImage_1);
maskImage_1 = getImageWithBlackBg(colorImage_1);
ret,maskImage_2 = cv.threshold(colorImage_1[:,:,channel],0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU);
cv.imwrite(targetDir+"JF14_091_S8_HE_mask3.bmp",maskImage_2);
filterImageByBlackImage(colorImage_1,maskImage_1);
newImage = np.zeros(colorImage_1.shape,dtype=np.uint8);
newImage[::] = 255;
for height in range(0,colorImage_1.shape[0]):
for weight in range(0,colorImage_1.shape[1]):
if (colorImage_1[height,weight]!=np.array([255,255,255])).all():
newImage[height,weight,0] = np.uint8((colorImage_1[height,weight,0]-colorImage_1[height,weight,1])) #B
newImage[height,weight,1] = np.uint8((colorImage_1[height,weight,2]-colorImage_1[height,weight,1])) #G
newImage[height,weight,2] = colorImage_1[height,weight,2]; #R
cv.imwrite(targetDir+"JF14_091_S8_HE_minus.bmp",newImage);
# fileFormat,outputGroups = separateColor(colorImage_1,"JF14_091_S8_HE_kernel%dChannel%d"%(kernelSize,channel)+"_group%d.bmp",targetDir);
# print("fileFormat %s \ngroups is %d"%(fileFormat,outputGroups));
# print("progress 1 over use time %d"%(time.time()-time0)); # 295
# time1 = time.time();
# group = 3;
# bestRegions = cv.imread(targetDir+"JF14_091_S8_HE_kernel%dChannel%d_group%d.bmp"%(kernelSize,channel,group),GRAY);
# # bestRegions_5_color = cv.imread(targetDir+"JF14_091_S8_HE_kernel15_group5.bmp",COLORFULL);
# ret,bestRegions = cv.threshold(bestRegions,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU);
# # # 对于分层结果图片,怎么判断哪些连通区域属于淋巴滤泡区域?哪些不是,依靠的鉴别特征是什么?
# result = circleOnOriginalImage(cv.imread(currentDir_+"JF14_091_S8_HE.bmp",COLORFULL),bestRegions);
# # # 多个结果,怎么选择最优的结果作为表示?
# cv.imwrite(targetDir+"JF14_091_S8_HE_result_%d.bmp"%(group),result);
# print("mark use time %d"%(time.time()-time1));
if __name__ == '__main__':
main(sys.argv);
|
import numpy as np
import pandas as pd
import xgboost as xgb
import lightgbm as lgb
import string
PUNCT_TO_REMOVE = string.punctuation
from nltk import word_tokenize
from nltk.corpus import stopwords
STOPWORDS = set(stopwords.words("english"))
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV # 搜索适合的参数
# 读邮件数据CSV
train_email = pd.read_csv("data/train.csv", usecols=[2], encoding='utf-8')
train_label = pd.read_csv("data/train.csv", usecols=[1], encoding='utf-8')
# print(df.describe(include='all')) # all 和 O 默认只对数字的信息进行统计
# 通过分析发现,数据中ham有3866条,总数4458,属于不平衡数据集
# 数据预处理
def text_processing(text):
text = text.lower()
text = re.compile(r'https?://\S+|www\.\S+').sub(r'', text)
text = text.translate(str.maketrans('', '', PUNCT_TO_REMOVE))
text = " ".join([word for word in str(text).split() if word not in STOPWORDS])
text = " ".join([stemmer.stem(word) for word in text.split()])
return text
train_email['Email'] = train_email['Email'].apply(text_processing)
# 将内容转为list类型
train_email = np.array(train_email).reshape((1, len(train_email)))[0].tolist()
train_label = np.array(train_label).reshape((1, len(train_email)))[0].tolist()
# 构造训练集和验证集
train_num = int(len(train_email)*0.8)
data_train = train_email[:train_num]
data_dev = train_email[train_num:]
label_train = train_label[:train_num]
label_dev = train_label[train_num:]
# # 使用词袋模型
vectorizer = CountVectorizer()
# CountVectorizer类会把文本全部转换为小写,然后将文本词块化。主要是分词,分标点
data_train_cnt = vectorizer.fit_transform(data_train)
data_test_cnt = vectorizer.transform(data_dev)
# 第一种方法,变成TF-IDF矩阵
transformer = TfidfTransformer()
data_train_tfidf = transformer.fit_transform(data_train_cnt)
data_test_tfidf = transformer.transform(data_test_cnt)
# 第二种方法,变成TF-IDF矩阵
# vectorizer_tfidf = TfidfVectorizer(sublinear_tf=True)
# data_train_tfidf = vectorizer_tfidf.fit_transform(data_train)
# data_test_tfidf = vectorizer_tfidf.transform(data_dev)
# 利用贝叶斯的方法
clf = MultinomialNB()
clf.fit(data_train_cnt, label_train)
score = clf.score(data_test_cnt, label_dev)
print("NB score: ", score)
# 加入TF-IDF特征后打开注释
# clf.fit(data_train_tfidf, label_train)
# score = clf.score(data_test_tfidf, label_dev)
# print("NB tfidf score: ", score)
# 利用SVM的方法
svm = LinearSVC()
svm.fit(data_train_cnt, label_train)
score = svm.score(data_test_cnt, label_dev)
print("SVM score: ", score)
# 加入TF-IDF特征后打开注释
# svm.fit(data_train_tfidf, label_train)
# score = svm.score(data_test_tfidf, label_dev)
# print("SVM score: ", score)
# 利用逻辑回归的方法
lr_crf = LogisticRegression(max_iter=150, penalty='l2', solver='lbfgs', random_state=0)
lr_crf.fit(data_train_tfidf, label_train)
score = lr_crf.score(data_test_tfidf, label_dev)
print("LR score: ", score)
# 利用随机森林的方法
rf = RandomForestClassifier(random_state=0, n_estimators=100, max_depth=None, verbose=0, n_jobs=-1)
rf.fit(data_train_tfidf, label_train)
score = rf.score(data_test_tfidf, label_dev)
print("RF score: ", score)
# 利用XGBoost方法
xgb_clf = xgb.XGBClassifier(n_estimators=100, n_jobs=-1, max_depth=15, min_child_weight=3, colsample_bytree=0.4)
xgb_clf.fit(data_train_tfidf, label_train)
score = xgb_clf.score(data_test_tfidf, label_dev)
print("XGBoost score: ", score)
# 利用LightGBM的方法
lgb_clf = lgb.LGBMClassifier()
# lgb_clf.fit(data_train_tfidf, label_train)
# 使用网格搜索得到适当参数
param_test = {
'max_depth': range(2, 3)
}
gsearch = GridSearchCV(estimator=lgb_clf, param_grid=param_test, scoring='roc_auc', cv=5)
gsearch.fit(data_train_tfidf, label_train)
# print(gsearch.best_params_)
score = gsearch.score(data_test_tfidf, label_dev)
print("LGBM score: ", score)
# 预测结果
result_lgbm = gsearch.predict(data_test_tfidf)
result_xgb = xgb_clf.predict(data_test_tfidf)
result_rf = rf.predict(data_test_tfidf)
result_lr = lr_crf.predict(data_test_tfidf)
result_svm = svm.predict(data_test_cnt)
result_nb = clf.predict(data_test_cnt)
print("NB confusion: ", confusion_matrix(label_dev, result_nb))
print("SVM confusion: ", confusion_matrix(label_dev, result_svm))
print("LR confusion: ", confusion_matrix(label_dev, result_lr))
print("RF confusion: ", confusion_matrix(label_dev, result_rf))
print("XGB confusion: ", confusion_matrix(label_dev, result_xgb))
print("LGBM confusion: ", confusion_matrix(label_dev, result_lgbm))
# 验证模型的性能
# 利用交叉验证
# accuracy = cross_val_score(clf, data_train_cnt, label_train, cv='warn', scoring='accuracy')
# print(accuracy.mean())
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 2 23:31:18 2021
@author: chanchanchan
"""
import streamlit as st
import pandas as pd
from matplotlib import pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
from MultiappFrame import MultiApp #MultiappFrame set up the framework for different tabs
import FirstArrivalNew, FastFouriorTransformNew, CrossCorrelationNew, StartPage, TransferFunctionNew # import app modules to generate multiapp (different tabs)
import DissertationPlotwithDataMain as main
app = MultiApp()
st.title("Bender Element Analysis ")
st.sidebar.write('Interpretation of Bender Element:')
app.add_app("Summary of Bender Element Analysis", StartPage.app)
app.add_app("First Arrival", FirstArrivalNew.app)
app.add_app("Fast Fourior Transform", FastFouriorTransformNew.app)
app.add_app("Cross Correlation", CrossCorrelationNew.app)
app.add_app("Transfer Function", TransferFunctionNew.app)
# The main app
app.run()
|
import sys
import random
from datetime import datetime
from zuridb import ZURIDB
class ZuriAtm():
db = ZURIDB
currentBalance = 0
current_date = datetime.now().strftime("%a %b %d,%H:%M")
naira = u'\u20A6'
def __init__(self) -> None:
self.login = ZuriAtm.login(ZuriAtm)
def generateAccountNumber(self):
return random.randrange(0000000000, 9999999999)
def register(self):
self.accountNumber = self.generateAccountNumber(ZuriAtm)
try:
self.username = str(input("Enter your name \n >>> "))
password = int(input("Enter your password \n >>> "))
except ValueError:
print("password must be digits(0-9) not letters")
sys.exit()
self.db[self.accountNumber] = [self.username, password]
# print(self.db)
print(
"Account created successfully! \n",\
end=f'Your account number is {self.accountNumber} \n'
)
return self.verify_user_login(ZuriAtm)
def transactions(self):
print("1. Withdrawal ")
print("2. Deposit ")
print("3. Complaints ")
print("8. Cancel ")
try:
resp = int(input(">>> "))
if resp == 1:
print("How much would you like to withdraw?")
response = int(input(">>> "))
print("Take your cash.")
sys.exit()
elif resp == 2:
print("How much would you like to deposit")
response = int(input(">>> "))
naira = u'u\20A6'
print(
"Your current balance is ",\
end=f'{self.naira}{self.currentBalance + response}'
)
sys.exit()
elif resp == 3:
print("What issue would you like to report?")
response = input(">>> ")
print("Thank you for contacting us.")
sys.exit()
elif resp == 8:
print("Thank you for choosing ZuriBank!")
sys.exit()
else:
print("Invalid Input! Please Try Again.")
self.transactions(ZuriAtm)
except ValueError:
print("Reply with digits of desired choice ")
self.transactions(ZuriAtm)
def verify_user_login(self):
print()
accountNumber = int(input("Enter your account number >>> "))
password = int(input("Enter your password >>> "))
print()
userPSW = {u:p[1] for (u,p) in self.db.items()}
if (
not accountNumber in userPSW.keys()) or (
password != userPSW[accountNumber]):
print("Error: Invalid User Credidentials")
print(
" 1. TRY AGAIN", "\n",
"2. Register", "\n",
"8. Exit"
)
resp = int(input(">>> "))
if resp == 1:
self.verify_user_login(ZuriAtm) # recursion
elif resp == 2:
self.register(ZuriAtm)
elif resp == 8:
print("Have a nice day!")
exit()
else:
print("Invalid Input! Please Try Again")
username = {u:p[0] for (u,p) in self.db.items()}
print(f" {self.current_date} ")
print(f"Welcome {username[accountNumber]}")
print("********** What would you like to do? **********")
return self.transactions(ZuriAtm)
def login(self):
print("\n", "********** WELCOME TO ZURIBANK ATM **********", "\n")
print(f" 1. Open Account", "\n", "2. Login")
resp = int(input(">>> "))
if resp == 1:
return self.register(ZuriAtm)
elif resp == 2:
self.verify_user_login(ZuriAtm)
ZuriAtm()
|
from generator.ApplicantCodeGenerator import ApplicantCodeGenerator
from dao.CityQueries import CityQueries
from model.Applicant import Applicant
class ApplicantsGenerator:
def __init__(self):
# Update the applicant table
Kovacs_Bela = Applicant.create(first_name='Béla', last_name='Kovács', applicant_city='Eger', status='new')
Keli_Geri = Applicant.create(first_name='Geri', last_name='Keli', applicant_city='Vác', status='new')
Kovari_Ivett = Applicant.create(first_name='Ivett', last_name='Kővári', applicant_city='Tokaj', status='new')
Szucs_Zoltan = Applicant.create(first_name='Zoltán', last_name='Szűcs', applicant_city='Eger', status='new')
Toth_Lajos = Applicant.create(first_name='Lajos', last_name='Tóth', applicant_city='Miskolc', status='new')
Lompos_Ferenc = Applicant.create(first_name='Ferenc', last_name='Lompos', applicant_city='Budapest', status='new')
Pinter_Akos = Applicant.create(first_name='Ákos', last_name='Pintér', applicant_city='Tiszaújváros', status='new')
Egri_Laura = Applicant.create(first_name='Laura', last_name='Egri', applicant_city='Szerencs', status='new')
Pici_Kati = Applicant.create(first_name='Kati', last_name='Pici', applicant_city='Budapest', status='new')
Toth_Kriszti = Applicant.create(first_name='Kriszti', last_name='Tóth', applicant_city='Budapest', status='new')
Varga_Dora = Applicant.create(first_name='Dora', last_name='Varga', applicant_city='Vác', status='new')
Tahin_Nora = Applicant.create(first_name='Nora', last_name='Tahin', applicant_city='Tokaj', status='new')
Molnar_Mari = Applicant.create(first_name='Mari', last_name='Molnár', applicant_city='Eger', status='new')
Toth_Lilla = Applicant.create(first_name='Lilla', last_name='Tóth', applicant_city='Miskolc', status='new')
Lompos_Fanni = Applicant.create(first_name='Fanni', last_name='Lompos', applicant_city='Budapest', status='new')
Pinter_Adri = Applicant.create(first_name='Adri', last_name='Pintér', applicant_city='Tiszaújváros', status='new')
Egri_Agi = Applicant.create(first_name='Agi', last_name='Egri', applicant_city='Szerencs', status='new')
Pap_Mira = Applicant.create(first_name='Mira', last_name='Pap', applicant_city='Budapest', status='new')
Mrszky_Weronika = Applicant.create(first_name='Weronika', last_name='Mrszky', applicant_city='Krakow', status='new')
Zrsky_Magdalena = Applicant.create(first_name='Magdalena', last_name='Zrsky', applicant_city='Krakow', status='new')
Trsky_Lena = Applicant.create(first_name='Lena', last_name='Trsky', applicant_city='Krakow', status='new')
Knilky_Karolina = Applicant.create(first_name='Karolina', last_name='Knilky', applicant_city='Wadowice', status='new')
Zrwzky_Nadia = Applicant.create(first_name='Nadia', last_name='Zrwzky', applicant_city='Zawoja', status='new')
Trwurinsky_Milena = Applicant.create(first_name='Milena', last_name='Trwurinsky',
applicant_city='Katowice', status='new')
self.__generate_school()
self.__generate_application_code()
def __generate_school(self):
# Then update the nearest_school and application_code column
applicants = Applicant.select()
for a in applicants:
# Generate the closest city
closest_city = CityQueries.getCityByName(a.applicant_city)
a.applied_school = closest_city.nearest_school
a.save()
def __generate_application_code(self):
applicants = Applicant.select()
for a in applicants:
# Create a new app code
current_app_code = ApplicantCodeGenerator()
a.applicant_code = current_app_code.application_code
a.save() |
x=4
if(x%2==0):
print("prime")
else:
print("prime")
|
a = int(input('Primeiro valor: '))
b = int(input('Segundo valor: '))
if a > b:
print('{} é maior que {}'.format(a, b))
elif b < a:
print('{} é maior que {}'.format(b, a))
elif a == b:
print('Os dois valores são IGUAIS') |
'''
Tests init file.
'''
def test_sample():
assert True
|
#Profanity alert
from urllib import request,parse
def read_text():
quotes = open("Path to the text file")#Enter the path of text file in "double quotes"
contents_of_file = quotes.read()
quotes.close()
check_profanity(contents_of_file)
def check_profanity(text_to_check):
url = "http://www.wdylike.appspot.com/?q="
url = url + parse.quote(text_to_check)
connection = request.urlopen(url)
output = connection.read()
#print(output)
connection.close()
if b"true" in output:
print("Profanity Alert!!")
elif b"false" in output:
print("No curse words")
else:
print("There was a problem")
read_text()
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views.generic import TemplateView
from .urls_api import router as note_router
urlpatterns = [
path("", TemplateView.as_view(template_name="homepage.html"), name="home"),
path("admin/", admin.site.urls),
path("account/", include("account.urls")),
path("payments/", include("pinax.stripe.urls")),
path("api/", include(note_router.urls)),
path("about/", TemplateView.as_view(template_name="about.html"), name="about"),
path("terms-of-use/", TemplateView.as_view(template_name="terms_of_use.html"), name="terms-of-use"),
path("privacy-policy/", TemplateView.as_view(template_name="privacy_policy.html"), name="privacy-policy"),
path("notes/", include("publish.urls")),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
#!/usr/bin/env python3
import sys,math,numpy
from itertools import permutations
def getDirn(robx,roby,dirn,space):
dx= 0
dy= 0
(maxx,maxy)=space.shape
#1=N,2=E,3=S,4=W
if (dirn == 1 or dirn == 3):
if robx > 0 and (space[robx-1][roby]) == ord('#'):
if dirn == 1:
return (4,'L',-1,0)
else:
return (4,'R',-1,0)
if robx < maxx-1 and (space[robx+1][roby]) == ord('#'):
if dirn == 3:
return (2,'L',1,0)
else:
return (2,'R',1,0)
if (dirn == 2 or dirn == 4):
if roby > 0 and (space[robx][roby-1]) == ord('#'):
if dirn == 2:
return (1,'L',0,-1)
else:
return (1,'R',0,-1)
if roby < maxy-1 and (space[robx][roby+1]) == ord('#'):
if dirn == 4:
return (3,'L',0,1)
else:
return (3,'R',0,1)
return (0,0,0,0)
def isscaffold(c):
if c==ord('#'):
return True
if c==ord('^'):
return True
if c==ord('<'):
return True
if c==ord('>'):
return True
if c==ord('v'):
return True
return False
def parseSpace(out):
width= 0
for o in out:
if o == 10:
break
width+= 1
height=0
w= 0
for o in out:
if o == 10:
w=0
height+=1
else:
w+=1
height-=1
space=numpy.empty((width,height),dtype=int)
x=0
y=0
for o in out:
if o == 10:
x=0
y+=1
continue
if (y >= height):
break
space[x][y]=o
x+=1
return space
def calcVal(s,pos,mode,rbase):
if mode == 0:
return s[s[pos]]
if mode == 1:
return s[pos]
if mode == 2:
return s[rbase+s[pos]]
print("Unknown mode",mode)
sys.exit()
def putVal(s,pos,mode,rbase,val):
if mode == 0:
s[s[pos]]= val
return
if mode == 1:
print("Mode 1 put should not happen")
s[pos]= val
if mode == 2:
#print("Mode 2 put should not happen")
s[rbase+s[pos]]= val
return
print("Unknown mode",mode)
sys.exit()
def doMachine(s,inp):
output= []
pos=0
rbase=0
while(True):
instr=s[pos]%100
mode1= (int(s[pos]/100))%10
mode2= (int(s[pos]/1000))%10
mode3= (int(s[pos]/10000))%10
#print(pos,s[pos],instr,mode1,mode2,mode3)
if (instr == 99):
return output
elif (instr == 1): #add
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
putVal(s,pos+3,mode3,rbase,x+y)
pos+=4
elif (instr == 2): #mult
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
putVal(s,pos+3,mode3,rbase,x*y)
pos+=4
#print("Mult",res)
elif (instr == 3): #input
#printScreen(output)
#print("INPUT")
i=inp.pop(0)
#print(i)
putVal(s,pos+1,mode1,rbase,i)
pos+=2
elif (instr == 4): #output
#0=unknown
#1=empty
#2=wall
#3=O2
out=calcVal(s,pos+1,mode1,rbase)
output.append(out)
pos+= 2
elif (instr == 5):
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
if (x != 0):
pos= y
else:
pos+=3
elif (instr == 6):
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
if (x == 0):
pos= y
else:
pos+=3
elif (instr == 7):
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
if (x < y):
putVal(s,pos+3,mode3,rbase,1)
else:
putVal(s,pos+3,mode3,rbase,0)
pos+=4
elif (instr == 8):
x=calcVal(s,pos+1,mode1,rbase)
y=calcVal(s,pos+2,mode2,rbase)
if (x == y):
putVal(s,pos+3,mode3,rbase,1)
else:
putVal(s,pos+3,mode3,rbase,0)
pos+=4
elif (instr == 9):
rbase+=calcVal(s,pos+1,mode1,rbase)
#print("rbase=",rbase)
pos+=2
else:
print("Did not expect ",s[pos])
sys.exit()
return(output,xpos,ypos)
#print(s)
#print("Part 1",s[0])
fp= open("rgcinput.txt","r")
l= fp.readline()
fp.close()
s=[]
for string in l.split(","):
s.append(int(string))
#print(s)
#OK, fix size assumptions not ideal but quick
for i in range(10000):
s.append(0)
sc=s.copy()
out=doMachine(s,[])
#print(out)
space=parseSpace(out)
(maxx,maxy)=space.shape
tot= 0
for i in range(1,maxx-1):
for j in range(1,maxy-1):
if isscaffold(space[i][j]) and \
isscaffold(space[i-1][j]) and \
isscaffold(space[i+1][j]) and \
isscaffold(space[i][j-1]) and \
isscaffold(space[i][j+1]):
tot+= i*j
print("Part 1",tot)
#print(space)
for j in range(0,maxy):
for i in range(0,maxx):
print(str(chr(space[i][j])),end="")
print()
robx=-1
roby=-1
for j in range(0,maxy):
for i in range(0,maxx):
if(space[i][j] == ord('^')):
robx=i
roby=j
break
if (robx >= 0):
break
dirn=1
#1=N,2=E,3=S,4=W
dirs=[]
x=robx
y=roby
while True:
(dirn,turn,dx,dy)=getDirn(x,y,dirn,space)
if(dirn == 0):
break
#print(dirn,turn,dx,dy)
dist=0
while True:
if (x+dx) >= 0 and x+dx < maxx and y+dy >=0 and y+dy < maxy \
and space[x+dx][y+dy] == ord('#'):
x+=dx
y+=dy
dist+=1
else:
break
dirs.append((turn,dist))
#print(dirs)
acode="R,6,L,8,R,8"
bcode="R,4,R,6,R,6,R,4,R,4"
ccode="L,8,R,6,L,10,L,10"
mcode="A,A,B,C,B,C,B,C,A,C"
inp=[]
for i in range(len(mcode)):
# print(ord(code[i]))
inp.append(ord(mcode[i]))
inp.append(10)
for i in range(len(acode)):
# print(ord(code[i]))
inp.append(ord(acode[i]))
inp.append(10)
for i in range(len(bcode)):
# print(ord(code[i]))
inp.append(ord(bcode[i]))
inp.append(10)
for i in range(len(ccode)):
# print(ord(code[i]))
inp.append(ord(ccode[i]))
inp.append(10)
inp.append(ord('n'))
inp.append(10)
sc[0]=2
out=doMachine(sc,inp)
#print(out)
space=parseSpace(out)
(maxx,maxy)=space.shape
for j in range(0,maxy):
for i in range(0,maxx):
print(str(chr(space[i][j])),end="")
print()
print(out[-1])
|
import sys
sys.path.append('./')
import json
import argparse
from datetime import datetime, timedelta
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
torch.backends.cudnn.enabled = True
from nep.utils import *
from nep.logger import myLogger
from nep.dataset import EvaDataset, Dataset
from nep.model import MLPClassifier, LabelEncoder, ModuleNet
def evaluate_embedding(args, dataset, embedding, repeat_times=5):
print('=' * 150)
best_train_accs, best_test_accs = [], []
best_train_acc_epochs, best_test_acc_epochs = [], []
X_train = embedding[dataset.train_nodes]
y_train = np.array([dataset.node_to_label[i] for i in dataset.train_nodes]).reshape(-1, 1)
train = np.concatenate((X_train, y_train), axis=1)
X_test = embedding[dataset.test_nodes]
y_test = np.array([dataset.node_to_label[i] for i in dataset.test_nodes]).reshape(-1, 1)
test = np.concatenate((X_test, y_test), axis=1)
X_train, y_train = torch.FloatTensor(train[:, :-1]), torch.LongTensor(train[:, -1])
X_test, y_test = torch.FloatTensor(test[:, :-1]), torch.LongTensor(test[:, -1])
X_train = X_train.cuda()
X_test = X_test.cuda()
y_train = y_train.cuda()
y_test = y_test.cuda()
dataloader = DataLoader(EvaDataset(X_train, y_train), batch_size=args.batch_size_eval, shuffle=True)
kwargs = {
'input_dim': args.embedding_size,
'hidden_dim': args.embedding_size // 2, # args.hidden_eval,
'output_dim': args.num_class
}
for i in range(repeat_times):
model = MLPClassifier(**kwargs).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate_eval)
best_test_acc, best_train_acc = 0, 0
best_test_acc_epoch, best_train_acc_epoch = 0, 0
count = 0
for epoch in range(args.num_epoch_eval):
for i, (batch, label) in enumerate(dataloader):
optimizer.zero_grad()
loss = model(batch, label)
loss.backward()
optimizer.step()
test_acc = model.predict(X_test, y_test)
test_acc *= 100
if test_acc > best_test_acc:
best_test_acc = test_acc
best_test_acc_epoch = epoch + 1
count = 0
else:
count += 1
if count >= args.patience_eval:
break
train_acc = model.predict(X_train, y_train)
train_acc *= 100
if train_acc > best_train_acc:
best_train_acc = train_acc
best_train_acc_epoch = epoch + 1
print('\repoch {}/{} train acc={:.4f}, test acc={:.4f}, best train acc={:.4f} @epoch:{:d}, best test acc={:.4f} @epoch:{:d}'.
format(epoch + 1, args.num_epoch_eval, train_acc, test_acc, best_train_acc, best_train_acc_epoch, best_test_acc, best_test_acc_epoch), end='')
sys.stdout.flush()
print('')
best_train_accs.append(best_train_acc)
best_test_accs.append(best_test_acc)
best_train_acc_epochs.append(best_train_acc_epoch)
best_test_acc_epochs.append(best_test_acc_epoch)
best_train_acc, best_train_acc_epoch, best_test_acc, best_test_acc_epoch = \
np.mean(best_train_accs), np.mean(best_train_acc_epochs), np.mean(best_test_accs), np.mean(best_test_acc_epochs)
std = np.std(best_test_accs)
print('=' * 150)
return best_train_acc, best_test_acc, std, int(best_train_acc_epoch), int(best_test_acc_epoch)
def parse_args():
parser = argparse.ArgumentParser()
# general options
parser.add_argument('--dataset', type=str, default='dblp-sub')
parser.add_argument('--pattern_path', type=str, default='', help="path to load/save pattern")
parser.add_argument("--prefix", type=str, default='', help="prefix use as addition directory")
parser.add_argument('--suffix', default='', type=str, help='suffix append to log dir')
parser.add_argument('--log_level', default=20, help='logger level.')
parser.add_argument('--log_every', type=int, default=100, help='log results every epoch.')
parser.add_argument('--save_every', type=int, default=500, help='save learned embedding every epoch.')
# data options
parser.add_argument('--target_node_type', type=str, default='a')
parser.add_argument('--train_ratio', type=float, default=0.8)
parser.add_argument('--superv_ratio', type=float, default=1.0)
parser.add_argument('--threshold', type=int, default=10)
parser.add_argument('--num_pattern', type=int, default=None)
parser.add_argument('--num_walkers_for_pattern', type=int, default=100)
parser.add_argument('--path_max_length', type=int, default=7)
# module options
parser.add_argument('--embedding_size', type=int, default=64)
# Optimization options
parser.add_argument('--num_epoch', type=int, default=100000)
parser.add_argument('--num_data_per_epoch', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--learning_rate', type=float, default=5e-4)
parser.add_argument('--early_stop', type=int, default=1)
parser.add_argument('--patience', type=int, default=2000)
# evluation options
parser.add_argument('--batch_size_eval', type=int, default=32)
parser.add_argument('--learning_rate_eval', type=float, default=5e-4)
parser.add_argument('--num_epoch_eval', type=int, default=500)
parser.add_argument('--patience_eval', type=int, default=50, help='used for early stop in evaluation')
# Output options
parser.add_argument('--output_path', type=str, default='')
return parser.parse_args()
def main(args):
start_time = time.time()
dataset = Dataset(data_dir=osp.join('data', args.dataset), num_data_per_epoch=args.num_data_per_epoch,
threshold=args.threshold, superv_ratio=args.superv_ratio, train_ratio=args.train_ratio)
args.num_class = dataset.num_class
args.num_module = dataset.num_link_type
args.num_link = dataset.num_link
args.node_type = dataset.id_to_type
args.num_node = dataset.num_node
args.num_target_node = len(dataset.type_to_node[args.target_node_type])
args.num_labeled_node = len(dataset.train_nodes)+len(dataset.test_nodes)
# initialize logger
if args.prefix:
base = os.path.join('log', args.prefix)
log_dir = os.path.join(base, args.suffix)
else:
comment = f'_{args.dataset}_{args.suffix}' if args.suffix else f'_{args.dataset}'
current_time = datetime.now().strftime('%b_%d_%H-%M-%S')
log_dir = os.path.join('log', current_time + comment)
args.log_dir = log_dir
if not os.path.exists(log_dir): os.makedirs(log_dir)
logger = myLogger(name='exp', log_path=os.path.join(log_dir, 'log.txt'))
logger.setLevel(args.log_level)
print_config(args, logger)
if not args.output_path:
args.output_path = osp.join(log_dir, 'embedding.bin')
# encode label
logger.info('=' * 100)
logger.info('Start encoding label to seed nodes...')
label_encoder = LabelEncoder(dataset, args)
target_embedding = label_encoder.train()
del label_encoder
# generate pattern
logger.info('Check pattern file...')
if not args.pattern_path:
args.pattern_path = f'data/tmp/{args.dataset}_pattern.dat'
logger.info(f'No input pattern file, so we generate pattern and save it into {args.pattern_path}')
dataset.init_pattern(dataset.train_nodes, args.num_pattern, args.num_walkers_for_pattern,
args.path_max_length, args.target_node_type, reverse_path=False, verbose=True)
dataset.save_pattern(args.pattern_path)
else:
logger.info(f'Load pattern from {args.pattern_path}')
dataset.load_pattern(args.pattern_path)
dataset.free_memory()
def next_batch(X, batch_size):
num = len(X)
for i in np.arange(0, num, batch_size):
yield X[i:i + batch_size]
# initialize model
kwargs = {
'target_embedding': target_embedding,
'num_node': args.num_target_node,
'embedding_size': args.embedding_size,
'num_module': args.num_module,
}
model = ModuleNet(**kwargs)
model.cuda()
model.train()
optimizer = optim.Adam(filter(lambda p: p.requires_grad,model.parameters()), lr=args.learning_rate)
# train model
train_start_time = time.time()
train_time, sample_time, sample_count = 0, 0, 0
acc_loss, count_loss, count = 0, 0, 0
best_test_acc = -1
trace = {'loss': [], 'test acc': []}
logger.info('=' * 100)
logger.info("Starting training model...")
for epoch in range(1, args.num_epoch+1):
start_sample_time = time.time()
ret = dataset.collect_data(args.num_data_per_epoch)
epoch_data, path = ret
train_data = torch.LongTensor(epoch_data).cuda()
sample_time += time.time() - start_sample_time
sample_count += 1
start_train_time = time.time()
for batch in next_batch(train_data, batch_size=args.batch_size):
optimizer.zero_grad()
loss = model(path, batch)
loss.backward()
acc_loss += loss.item()
count_loss += 1
optimizer.step()
model.copy_embedding()
train_time += time.time() - start_train_time
avr_sample_time = timedelta(seconds=(sample_time/sample_count))
avr_train_time = timedelta(seconds=(train_time/(epoch + 1)))
if epoch % args.log_every == 0:
duration = time.time() - train_start_time
avr_loss = acc_loss / count_loss
acc_loss, count_loss = 0, 0
logger.info(f'Epoch: {epoch:04d} loss: {avr_loss:.4f} duration: {duration:.4f} avr train time: {avr_train_time} avr sample time: {avr_sample_time}')
trace['loss'].append((epoch, avr_loss))
if epoch % args.save_every == 0:
train_acc, test_acc, std, train_acc_epoch, test_acc_epoch = evaluate_embedding(args, dataset, model.return_embedding())
trace['test acc'].append((epoch, test_acc))
logger.info('best train acc={:.2f} @epoch:{:d}, best test acc={:.2f} += {:.2f} @epoch:{:d}'.
format(train_acc, train_acc_epoch, test_acc, std, test_acc_epoch))
if test_acc > best_test_acc:
best_test_acc = test_acc
best_std = std
best_epoch = epoch
best_model = model.state_dict()
best_opt = optimizer.state_dict()
model.save_embedding(dataset.id_to_name, args.output_path, True)
count = 0
else:
if args.early_stop:
count += args.save_every
if count >= args.patience:
logger.info('early stopped!')
break
print('')
# save results
json.dump(trace, open(osp.join(args.log_dir, 'trace.json'), 'w'), indent=4)
save_checkpoint({
'args': args,
'model': best_model,
'optimizer': best_opt,
}, args.log_dir, f'epoch{best_epoch}_acc{best_test_acc}.pth.tar', logger, True)
total_cost_time = "total cost time: {} ".format(timedelta(seconds=(time.time() - start_time)))
logger.info('best test acc={:.2f} += {:.2f} @epoch:{:d}'.
format(best_test_acc, best_std, best_epoch))
logger.info(total_cost_time)
if __name__ == '__main__':
args = parse_args()
main(args)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Richard Huang <lasselindqvist@users.noreply.github.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IMAP Library - a IMAP email testing library.
"""
# To use a consistent encoding
import codecs
from os.path import abspath, dirname, join
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
LIBRARY_NAME = 'ImapLibrary2'
CWD = abspath(dirname(__file__))
VERSION_PATH = join(CWD, 'src', LIBRARY_NAME, 'version.py')
exec(compile(open(VERSION_PATH).read(), VERSION_PATH, 'exec'))
with codecs.open(join(CWD, 'README.md'), encoding='utf-8') as reader:
LONG_DESCRIPTION = reader.read()
setup(
name='robotframework-%s' % LIBRARY_NAME.lower(),
version=VERSION, # pylint: disable=undefined-variable # noqa
description='A IMAP email testing library for Robot Framework',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/lasselindqvist/robotframework-%s' % LIBRARY_NAME.lower(),
author='Lasse Lindqvist',
author_email='lasselindqvist@users.noreply.github.com',
license='Apache License, Version 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Robot Framework',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Testing',
],
keywords='robot framework testing automation imap email mail softwaretesting',
platforms='any',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=['future', 'robotframework >= 2.6.0', 'pysocks']
)
|
#!/usr/bin/env python
import macropy.activate
import gen
import sys
import compose
import hajson
import HAIEC2
def comp(haList):
return compose.compose(haList)
def compile(ha, **kwargs):
# Step-1: Make sure that the hybrid automaton is well-formed
if 'COMPOSED' not in kwargs:
ha = gen.preprocess(ha)
if 'ABOF' in kwargs and kwargs['ABOF']:
gen.ALL_BETS_OFF = True
if gen.isWha(ha):
# Step-2 get a new ha with the Nsteps for each loc computed
# FIXME: Nsteps might be None in the general case
sha = gen.getSha(ha)
if sha is None:
raise "Cannot generate code!"
else:
gen.codeGen(sha)
def main(argv):
# Parse JSON file
ha = hajson.parseHA(argv[0])
if argv[1] == 'fbt':
HAIEC2.compileToFBT(ha, argv[2])
else:
compile(ha, ABOF=True)
if __name__ == '__main__':
main(sys.argv[1:])
|
from waitlist.blueprints.swagger_api.statistics.blueprint import bp_v1
from flask_login.utils import login_required
from waitlist.permissions import perm_manager
from flask.wrappers import Response
from waitlist.blueprints.swagger_api.statistics.data import StatsManager
from datetime import timedelta
from flask import jsonify
perm_access = perm_manager.get_permission('settings_access')
@login_required
@perm_access.require()
@bp_v1.route('/distinct_hull_character/<int:duration_seconds>',
methods=['GET'])
def distinct_hull_character_get_v1(duration_seconds: int) -> Response:
"""
file: distinct_hull_character_get_v1.yml
"""
statistic_data = StatsManager.get_distinct_hull_character_stats(
timedelta(seconds=duration_seconds)
)
return jsonify(statistic_data)
@login_required
@perm_access.require()
@bp_v1.route('/approved_fits_by_account/<int:duration_seconds>',
methods=['GET'])
def approved_fits_by_account_v1(duration_seconds: int) -> Response:
"""
file: approved_fits_by_account_get_v1.yml
"""
statistic_data = StatsManager.get_approved_fits_by_account_stats(
timedelta(seconds=duration_seconds)
)
return jsonify(statistic_data)
@login_required
@perm_access.require()
@bp_v1.route('/joined_members/<int:duration_seconds>',
methods=['GET'])
def joined_members_v1(duration_seconds: int) -> Response:
"""
file: joined_members_get_v1.yml
"""
statistic_data = StatsManager.get_joined_members_stats(
timedelta(seconds=duration_seconds)
)
return jsonify(statistic_data) |
def nickname_generator(name):
if len(name) < 4:
return "Error: Name too short"
elif name[2] in "aeiou":
return name[:4]
return name[:3]
'''
Nickname Generator
Write a function, nicknameGenerator that takes a string name as an argument
and returns the first 3 or 4 letters as a nickname.
If the 3rd letter is a consonant, return the first 3 letters.
nickname("Robert") //=> "Rob"
nickname("Kimberly") //=> "Kim"
nickname("Samantha") //=> "Sam"
If the 3rd letter is a vowel, return the first 4 letters.
nickname("Jeannie") //=> "Jean"
nickname("Douglas") //=> "Doug"
nickname("Gregory") //=> "Greg"
If the string is less than 4 characters, return "Error: Name too short".
Notes:
Vowels are "aeiou", so discount the letter "y".
Input will always be a string.
Input will always have the first letter capitalised and the rest lowercase (e.g. Sam).
The input can be modified
'''
|
import numpy as np
from sklearn.model_selection import cross_val_predict
from sklearn.base import clone
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from joblib import Parallel, delayed
def _assure_2d_array(x):
if x.ndim == 1:
x = x.reshape(-1, 1)
elif x.ndim > 2:
raise ValueError('Only one- or two-dimensional arrays are allowed')
return x
def _get_cond_smpls(smpls, bin_var):
smpls_0 = [(np.intersect1d(np.where(bin_var == 0)[0], train), test) for train, test in smpls]
smpls_1 = [(np.intersect1d(np.where(bin_var == 1)[0], train), test) for train, test in smpls]
return smpls_0, smpls_1
def _check_is_partition(smpls, n_obs):
test_indices = np.concatenate([test_index for _, test_index in smpls])
if len(test_indices) != n_obs:
return False
hit = np.zeros(n_obs, dtype=bool)
hit[test_indices] = True
if not np.all(hit):
return False
return True
def _check_all_smpls(all_smpls, n_obs, check_intersect=False):
all_smpls_checked = list()
for smpl in all_smpls:
all_smpls_checked.append(_check_smpl_split(smpl, n_obs, check_intersect))
return all_smpls_checked
def _check_smpl_split(smpl, n_obs, check_intersect=False):
smpl_checked = list()
for tpl in smpl:
smpl_checked.append(_check_smpl_split_tpl(tpl, n_obs, check_intersect))
return smpl_checked
def _check_smpl_split_tpl(tpl, n_obs, check_intersect=False):
train_index = np.sort(np.array(tpl[0]))
test_index = np.sort(np.array(tpl[1]))
if not issubclass(train_index.dtype.type, np.integer):
raise TypeError('Invalid sample split. Train indices must be of type integer.')
if not issubclass(test_index.dtype.type, np.integer):
raise TypeError('Invalid sample split. Test indices must be of type integer.')
if check_intersect:
if set(train_index) & set(test_index):
raise ValueError('Invalid sample split. Intersection of train and test indices is not empty.')
if len(np.unique(train_index)) != len(train_index):
raise ValueError('Invalid sample split. Train indices contain non-unique entries.')
if len(np.unique(test_index)) != len(test_index):
raise ValueError('Invalid sample split. Test indices contain non-unique entries.')
# we sort the indices above
# if not np.all(np.diff(train_index) > 0):
# raise NotImplementedError('Invalid sample split. Only sorted train indices are supported.')
# if not np.all(np.diff(test_index) > 0):
# raise NotImplementedError('Invalid sample split. Only sorted test indices are supported.')
if not set(train_index).issubset(range(n_obs)):
raise ValueError('Invalid sample split. Train indices must be in [0, n_obs).')
if not set(test_index).issubset(range(n_obs)):
raise ValueError('Invalid sample split. Test indices must be in [0, n_obs).')
return train_index, test_index
def _fit(estimator, x, y, train_index, idx=None):
estimator.fit(x[train_index, :], y[train_index])
return estimator, idx
def _dml_cv_predict(estimator, x, y, smpls=None,
n_jobs=None, est_params=None, method='predict', return_train_preds=False):
n_obs = x.shape[0]
smpls_is_partition = _check_is_partition(smpls, n_obs)
fold_specific_params = (est_params is not None) & (not isinstance(est_params, dict))
fold_specific_target = isinstance(y, list)
manual_cv_predict = (not smpls_is_partition) | return_train_preds | fold_specific_params | fold_specific_target
if not manual_cv_predict:
if est_params is None:
# if there are no parameters set we redirect to the standard method
preds = cross_val_predict(clone(estimator), x, y, cv=smpls, n_jobs=n_jobs, method=method)
else:
assert isinstance(est_params, dict)
# if no fold-specific parameters we redirect to the standard method
# warnings.warn("Using the same (hyper-)parameters for all folds")
preds = cross_val_predict(clone(estimator).set_params(**est_params), x, y, cv=smpls, n_jobs=n_jobs,
method=method)
if method == 'predict_proba':
return preds[:, 1]
else:
return preds
else:
if not smpls_is_partition:
assert not fold_specific_target, 'combination of fold-specific y and no cross-fitting not implemented yet'
assert len(smpls) == 1
if method == 'predict_proba':
assert not fold_specific_target # fold_specific_target only needed for PLIV.partialXZ
y = np.asarray(y)
le = LabelEncoder()
y = le.fit_transform(y)
parallel = Parallel(n_jobs=n_jobs, verbose=0, pre_dispatch='2*n_jobs')
if fold_specific_target:
y_list = list()
for idx, (train_index, _) in enumerate(smpls):
xx = np.full(n_obs, np.nan)
xx[train_index] = y[idx]
y_list.append(xx)
else:
# just replicate the y in a list
y_list = [y] * len(smpls)
if est_params is None:
fitted_models = parallel(delayed(_fit)(
clone(estimator), x, y_list[idx], train_index, idx)
for idx, (train_index, test_index) in enumerate(smpls))
elif isinstance(est_params, dict):
# warnings.warn("Using the same (hyper-)parameters for all folds")
fitted_models = parallel(delayed(_fit)(
clone(estimator).set_params(**est_params), x, y_list[idx], train_index, idx)
for idx, (train_index, test_index) in enumerate(smpls))
else:
assert len(est_params) == len(smpls), 'provide one parameter setting per fold'
fitted_models = parallel(delayed(_fit)(
clone(estimator).set_params(**est_params[idx]), x, y_list[idx], train_index, idx)
for idx, (train_index, test_index) in enumerate(smpls))
preds = np.full(n_obs, np.nan)
train_preds = list()
for idx, (train_index, test_index) in enumerate(smpls):
assert idx == fitted_models[idx][1]
pred_fun = getattr(fitted_models[idx][0], method)
if method == 'predict_proba':
preds[test_index] = pred_fun(x[test_index, :])[:, 1]
else:
preds[test_index] = pred_fun(x[test_index, :])
if return_train_preds:
train_preds.append(pred_fun(x[train_index, :]))
if return_train_preds:
return preds, train_preds
else:
return preds
def _dml_tune(y, x, train_inds,
learner, param_grid, scoring_method,
n_folds_tune, n_jobs_cv, search_mode, n_iter_randomized_search):
tune_res = list()
for train_index in train_inds:
tune_resampling = KFold(n_splits=n_folds_tune, shuffle=True)
if search_mode == 'grid_search':
g_grid_search = GridSearchCV(learner, param_grid,
scoring=scoring_method,
cv=tune_resampling, n_jobs=n_jobs_cv)
else:
assert search_mode == 'randomized_search'
g_grid_search = RandomizedSearchCV(learner, param_grid,
scoring=scoring_method,
cv=tune_resampling, n_jobs=n_jobs_cv,
n_iter=n_iter_randomized_search)
tune_res.append(g_grid_search.fit(x[train_index, :], y[train_index]))
return tune_res
def _draw_weights(method, n_rep_boot, n_obs):
if method == 'Bayes':
weights = np.random.exponential(scale=1.0, size=(n_rep_boot, n_obs)) - 1.
elif method == 'normal':
weights = np.random.normal(loc=0.0, scale=1.0, size=(n_rep_boot, n_obs))
elif method == 'wild':
xx = np.random.normal(loc=0.0, scale=1.0, size=(n_rep_boot, n_obs))
yy = np.random.normal(loc=0.0, scale=1.0, size=(n_rep_boot, n_obs))
weights = xx / np.sqrt(2) + (np.power(yy, 2) - 1) / 2
else:
raise ValueError('invalid boot method')
return weights
|
from django.shortcuts import render
from rest_framework import generics, status
from rest_framework.response import Response
from .models import *
from .serializer import CarroSerializer
import requests
# Create your views here.
class CarrosViewSet(generics.ListCreateAPIView):
'''
Contiene información sobre carros
'''
queryset = Carro.objects.all()
#lookup_field = 'id'
serializer_class = CarroSerializer
class CarroDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Carro.objects.all()
serializer_class = CarroSerializer
|
from __future__ import division
import numpy as np
from scipy.spatial import cKDTree
import matplotlib.pyplot as plt
class P(object):
r"""
y, v
8 4 6
\|/
1-- o -- 2 x, u
/|\
5 3 7
"""
def __init__(self, size):
self.rho = 5.e3
self.i = 0
self.j = 0
#TODO use list of adjacent particles
#TODO compute vector of positions among adjacent particles
# adjacent particles
self.padjs = []
# initial distances
self.dadjs = []
self.E = 71e9
self.nu = 0.33
self.size = size
self.h = 0.01
self.A = self.h * self.size
self.m = self.size * self.A * self.rho
self.x = 0
self.y = 0
self.pos_1 = np.array([0., 0.])
self.pos = np.array([0., 0.])
self.ut = np.array([0, 0.]) # 2-D uxx, vxx, rzxx
self.utt = np.array([0, 0.]) # 2-D uxx, vxx, rzxx
self.f = np.array([0., 0.]) # 2-D
self.fext = np.array([0., 0.]) # 2-D
def __str__(self):
return 'P%02d%02d' % (self.i, self.j)
def __repr__(self):
return self.__str__()
# building particles
lenx = 1
leny = 0.5
size = 0.1
numx = int(lenx/size)
numy = int(leny/size)
ps = [P(size) for _ in range(numx * numy)]
# positions
psdict = {}
for i in range(numx):
psdict[i] = {}
for j in range(numy):
p = ps[j*numx + i]
psdict[i][j] = p
p.x = lenx*i/(numx-1)
p.y = leny*j/(numy-1)
p.pos[:] = p.x, p.y
p.pos_1[:] = p.x, p.y
p.i = i
p.j = j
# connectivity
for i in range(numx):
for j in range(numy):
p = psdict[i][j]
if i != 0:
p.padjs.append(psdict[i-1][j])
if j > 0:
p.padjs.append(psdict[i-1][j-1])
if j < numy-1:
p.padjs.append(psdict[i-1][j+1])
if i != numx-1:
p.padjs.append(psdict[i+1][j])
if j > 0:
p.padjs.append(psdict[i+1][j-1])
if j < numy-1:
p.padjs.append(psdict[i+1][j+1])
if j != 0:
p.padjs.append(psdict[i][j-1])
if j != numy-1:
p.padjs.append(psdict[i][j+1])
for p in ps:
for padj in p.padjs:
dist = ((p.pos - padj.pos)**2).sum()**0.5
p.dadjs.append(dist)
fig = plt.figure(dpi=500, figsize=(10, 10))
fig.clear()
ax = plt.gca()
#ax.set_xlim(-2*size, lenx+2*size)
#ax.set_ylim(-2*size, leny+2*size)
ax.plot([p.pos[0] for p in ps], [p.pos[1] for p in ps], 'ko')
ax.set_aspect('equal')
fig.savefig(filename='tmp_beam2d_points.png', bbox_inches='tight')
# integration
dt = 0.0001
n = 50
psdict[numx-1][numy-1].fext += [0, 1000]
for step in range(n):
print('Step %d' % (step + 1))
total_work = 0
for p in ps:
# forces
p.f *= 0
p.f += p.fext
utt = 1./p.m*p.f
ut = utt*dt
p.pos += ut*dt
# a small system of equation need to be solved here to find the
# equilibrium of the particle considering the adjacents
p.f *= 0
p.f += p.fext
# adding adjacent reaction
for i, padj in enumerate(p.padjs):
#TODO not considering area reduction deformed with dist variation
k = p.E * p.A / p.dadjs[i]
pos_diff = p.pos - padj.pos_1
dx, dy = pos_diff
d = (dx**2 + dy**2)**0.5
fres = (d - p.dadjs[i]) * k
fx = -fres * dx / d
fy = -fres * dy / d
p.f += fx, fy
p.utt = 1./p.m*p.f
p.ut += p.utt*dt
p.pos += p.ut*dt
print(' Particle %s force %s' % (p, p.f))
total_work += (p.ut*dt * p.f).sum()
print(' Total work %f' % total_work)
# all positions updated
# all velocities updated
# all accelerations updated
for p in ps:
if np.any(np.isnan(p.pos)):
print(p.pos)
raise RuntimeError()
p.pos_1[:] = p.pos
# clamping one side
if p.i in [0]:
p.pos[:] = p.x, p.y
p.pos_1[:] = p.x, p.y
p.ut *= 0
xplot = [p.pos[0] for p in ps]
yplot = [p.pos[1] for p in ps]
ax.plot(xplot, yplot, 'r^', mfc='None')
fig.savefig(filename='tmp_beam2d_deformed.png', bbox_inches='tight')
if True:
fig.clear()
ax = fig.gca()
ax.set_aspect('equal')
xplot = np.array([p.x for p in ps]).reshape(numy, numx)
yplot = np.array([p.y for p in ps]).reshape(numy, numx)
fxplot = np.array([p.f[1] for p in ps]).reshape(numy, numx)
levels = np.linspace(fxplot.min(), fxplot.max(), 400)
ax.contourf(xplot, yplot, fxplot, levels=levels, colorbar=True)
fig.savefig(filename='tmp_beam2d_fx.png', bbox_inches='tight')
|
# Django settings for gim project.
import json
import os.path
import logging
import socket
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse_lazy
SETTINGS_PATH = os.path.dirname(os.path.abspath(__file__))
GIM_ROOT = os.path.normpath(os.path.join(SETTINGS_PATH, '..'))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
INTERNAL_IPS = ('127.0.0.1',)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
DATE_FORMAT = "N j, Y" # Aug. 6, 2012.
DATETIME_FORMAT = "N j, Y P" # Aug. 6, 2012 1:55 p.m.
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.normpath(os.path.join(GIM_ROOT, 'static/'))
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'OPTIONS': {
'context_processors': [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"gim.front.context_processors.default_context_data",
"gim.front.context_processors.user_context",
"gim.front.context_processors.js_data",
],
'loaders': [
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)),
]
}
}
]
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'gim.front.middleware.AddMessagesToAjaxResponseMiddleware',
'async_messages.middleware.AsyncMiddleware',
'gim.front.middleware.VaryOnAcceptHeaderMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gim.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'gim.wsgi.application'
AUTH_USER_MODEL = 'core.GithubUser'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'adv_cache_tag',
'macros',
'jsonify',
'gim.core',
'gim.subscriptions',
'gim.hooks', # github hooks (push from github to isshub)
'gim.events', # change events of issues (updated body, labels...)
'gim.graphs', # graph of repositories...
'gim.activity', # activity (timeline, updates...)
'gim.front',
'gim.front.auth',
'gim.front.activity',
'gim.front.dashboard',
'gim.front.dashboard.repositories',
'gim.front.github_notifications',
'gim.front.repository',
'gim.front.repository.issues',
'gim.front.repository.dashboard',
'gim.front.repository.board',
]
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
# 'template_timings_panel.panels.TemplateTimings.TemplateTimings',
]
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_COOKIE_AGE = 3600 * 24 * 31 # 31 days
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
SESSION_COOKIE_NAME = 'gimsid'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
class ContextFilter(logging.Filter):
full_process_name = '~%9s' % socket.gethostname()[-9:]
if os.environ.get('SUPERVISOR_PROCESS_NAME'):
full_process_name += ' ~%6s' % os.environ.get('SUPERVISOR_PROCESS_NAME').strip('"')[-6:]
full_process_name += ' ~%5d' % os.getpid()
def filter(self, record):
record.full_process_name = self.full_process_name
return True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'full_process_name': {
'()': 'gim.settings.ContextFilter'
},
},
'formatters': {
'full': {
'()': 'django.utils.log.ServerFormatter',
'format': u'%(full_process_name)s ~%(asctime)s ~%(name)-10.10s ~%(levelname)-8.8s ~%(message)s',
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false', 'full_process_name'],
'class': 'django.utils.log.AdminEmailHandler',
'formatter': 'full',
},
'console': {
'level': 'DEBUG',
'filters': ['full_process_name'],
'class': 'logging.StreamHandler',
'formatter': 'full',
},
},
'loggers': {
'django': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
'gim': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
'gim.ws': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'gim.graphql': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'gim.maintenance': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'gim.log': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'gim.jobs': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'github': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
}
}
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'gim.front.auth.backends.GithubBackend',
]
LOGIN_URL = reverse_lazy('front:auth:login')
LOGIN_REDIRECT_URL = reverse_lazy('front:dashboard:home')
GITHUB_SCOPE = 'repo'
WORKERS_LOGGER_CONFIG = {
'handler': logging.StreamHandler(),
'level': logging.INFO
}
secrets = {}
if 'SECRETS_PATH' in os.environ:
with open(os.environ['SECRETS_PATH']) as f:
secrets = json.loads(f.read())
def get_env_variable(var_name, **kwargs):
try:
return os.environ[var_name]
except KeyError:
try:
return secrets[var_name]
except KeyError:
if 'default' in kwargs:
return kwargs['default']
msg = "Set the %s environment variable"
error_msg = msg % var_name
raise ImproperlyConfigured(error_msg)
# define settings below in env or file defined by SECRETS_PATH
# SECRET_KEY, GITHUB_CLIENT_ID and GITHUB_CLIENT_SECRET **MUST** be defined
SECRET_KEY = get_env_variable('DJANGO_SECRET_KEY')
ALLOWED_HOSTS = get_env_variable('ALLOWED_HOSTS', default=[])
if isinstance(ALLOWED_HOSTS, basestring):
# if got from json, it's already a list, but not from env
ALLOWED_HOSTS = ALLOWED_HOSTS.split(',')
USE_X_FORWARDED_PORT = True
SESSION_COOKIE_DOMAIN = get_env_variable('SESSION_COOKIE_DOMAIN', default=None) or None
SESSION_COOKIE_SECURE = bool(get_env_variable('SESSION_COOKIE_SECURE', default=False))
CSRF_COOKIE_SECURE = bool(get_env_variable('CSRF_COOKIE_SECURE', default=False))
GITHUB_CLIENT_ID = get_env_variable('GITHUB_CLIENT_ID')
GITHUB_CLIENT_SECRET = get_env_variable('GITHUB_CLIENT_SECRET')
GITHUB_HOOK_URL = get_env_variable('GITHUB_HOOK_URL', default=None)
DATABASES = { # default to a postgresql db named "gim"
'default': {
'ENGINE': get_env_variable('DB_ENGINE', default='django.db.backends.postgresql'),
'NAME': get_env_variable('DB_NAME', default='gim'),
'USER': get_env_variable('DB_USER', default=''),
'PASSWORD': get_env_variable('DB_PASSWORD', default=''),
'HOST': get_env_variable('DB_HOST', default=''),
'PORT': get_env_variable('DB_PORT', default=''),
'CONN_MAX_AGE': get_env_variable('DB_CONN_MAX_AGE', default=0),
}
}
def get_redis_options(env_prefix, default_host='localhost', default_port=6379, default_db=0, default_socket_path=None):
def get_env(suffix, default):
return get_env_variable('%s_REDIS_%s' % (env_prefix, suffix), default=default)
host = get_env('HOST', default_socket_path)
if host.startswith('/'):
result = {
'unix_socket_path': host,
}
else:
result = {
'host': host,
'port': int(get_env('PORT', default_port)),
}
result['db'] = int(get_env('DB', default_db))
return result
def redis_options_to_url(options):
if 'unix_socket_path' in options:
return 'unix://%s?db=%d' % (
options['unix_socket_path'],
options['db'],
)
return 'redis://%s:%d/%d' % (
options['host'],
options['port'],
options['db'],
)
LIMPYD_DB_CONFIG = get_redis_options('LIMPYD_DB')
LIMPYD_DB_WS_CONFIG = get_redis_options('LIMPYD_DB_WS')
LIMPYD_DB_ACTIVITY_CONFIG = get_redis_options('LIMPYD_DB_ACTIVITY')
WORKERS_REDIS_CONFIG = get_redis_options('LIMPYD_JOBS')
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': redis_options_to_url(get_redis_options('CACHE_DEFAULT', default_db=1)),
'TIMEOUT': 30*24*60*60, # 30 days
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PARSER_CLASS': 'redis.connection.HiredisParser',
'PICKLE_VERSION': 2,
}
},
'issues_tag': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': redis_options_to_url(get_redis_options('CACHE_ISSUES_TAG', default_db=2)),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PARSER_CLASS': 'redis.connection.HiredisParser',
'PICKLE_VERSION': 2,
}
},
}
# If changed, the default value in database for GithubUser.subscriptions_limit must be changed too
# Use a negative value in env to behave like `None` to remove any subscriptions limit
SUBSCRIPTIONS_LIMIT = int(get_env_variable('SUBSCRIPTIONS_LIMIT', default=2))
if SUBSCRIPTIONS_LIMIT < 0:
SUBSCRIPTIONS_LIMIT = None
SPONSORING_OWN_LIMIT = int(get_env_variable('SPONSORING_OWN_LIMIT', default=0))
if SPONSORING_OWN_LIMIT < 0:
SPONSORING_OWN_LIMIT = None
SPONSORING_ANY_LIMIT = int(get_env_variable('SPONSORING_ANY_LIMIT', default=0))
if SPONSORING_ANY_LIMIT < 0:
SPONSORING_ANY_LIMIT = None
SUBSCRIPTIONS_LIMIT_MANAGEMENT_FUNC = get_env_variable('SUBSCRIPTIONS_LIMIT_MANAGEMENT_FUNC', default=None)
TOS_URL = get_env_variable('TOS_URL', default=None)
TOS_URL_IS_STATIC = bool(get_env_variable('TOS_URL_IS_STATIC', default=False))
CROSSBAR_REST_HOST = get_env_variable('CROSSBAR_REST_HOST', default='http://127.0.0.1') # with scheme
CROSSBAR_REST_PORT = get_env_variable('CROSSBAR_REST_PORT', default='8888')
CROSSBAR_REST_KEY = str(get_env_variable('CROSSBAR_REST_KEY'))
CROSSBAR_REST_SECRET = str(get_env_variable('CROSSBAR_REST_SECRET'))
WS_SUBDOMAIN = get_env_variable('WS_SUBDOMAIN', default=None) or None
AVATARS_PREFIX = get_env_variable('AVATARS_PREFIX', default='') or ''
BRAND_SHORT_NAME = get_env_variable('BRAND_SHORT_NAME', default='G.I.M')
BRAND_LONG_NAME = get_env_variable('BRAND_LONG_NAME', default='Github Issues Manager')
FAVICON_PATH = get_env_variable('FAVICON_PATH', default=None)
FAVICON_STATIC_MANAGED = bool(get_env_variable('FAVICON_STATIC_MANAGED', default=True))
FAVICON_DYN_BACKGROUND_COLOR = get_env_variable('FAVICON_DYN_BACKGROUND_COLOR', default='#a24037')
FAVICON_DYN_TEXT_COLOR = get_env_variable('FAVICON_DYN_TEXT_COLOR', default='#fff')
HEADWAYAPP_ACCOUNT = get_env_variable('HEADWAYAPP_ACCOUNT', default=None)
GOOGLE_ANALYTICS_ID = get_env_variable('GOOGLE_ANALYTICS_ID', default=None)
DEBUG_TOOLBAR = False
_TEMPLATE_LOADERS = None
_TEMPLATE_DEBUG = None
try:
from .local_settings import *
except ImportError:
pass
else:
if DEBUG_TOOLBAR:
INSTALLED_APPS += ['debug_toolbar', 'template_timings_panel', ]
MIDDLEWARE_CLASSES += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
from . import local_settings
if getattr(local_settings, '_TEMPLATE_LOADERS', None) is not None:
TEMPLATES[0]['OPTIONS']['loaders'] = local_settings._TEMPLATE_LOADERS
if getattr(local_settings, '_TEMPLATE_DEBUG', None) is not None:
TEMPLATES[0]['OPTIONS']['debug'] = local_settings._TEMPLATE_DEBUG
if hasattr(local_settings, '_CONTEXT_PROCESSORS'):
TEMPLATES[0]['OPTIONS']['context_processors'] += local_settings._CONTEXT_PROCESSORS
if hasattr(local_settings, '_MIDDLEWARE_CLASSES'):
MIDDLEWARE_CLASSES += local_settings._MIDDLEWARE_CLASSES
if DEBUG:
LOGGING['loggers']['django']['level'] = 'INFO'
|
# copy的 引入时间戳来确定树中父子节点的关系
class Solution:
def minimumScore(self, nums: List[int], edges: List[List[int]]) -> int:
n = len(nums)
g = [[] for _ in range(n)]
for x, y in edges:
g[x].append(y)
g[y].append(x)
xor, in_, out, clock = [0] * n, [0] * n, [0] * n, 0
def dfs(x: int, fa: int) -> None:
nonlocal clock
clock += 1
in_[x] = clock
xor[x] = nums[x]
for y in g[x]:
if y != fa:
dfs(y, x)
xor[x] ^= xor[y]
out[x] = clock
dfs(0, -1)
ans = inf
for i in range(2, n):
for j in range(1, i):
if in_[i] < in_[j] <= out[i]: # i 是 j 的祖先节点
x, y, z = xor[j], xor[i] ^ xor[j], xor[0] ^ xor[i]
elif in_[j] < in_[i] <= out[j]: # j 是 i 的祖先节点
x, y, z = xor[i], xor[i] ^ xor[j], xor[0] ^ xor[j]
else: # 删除的两条边分别属于两颗不相交的子树
x, y, z = xor[i], xor[j], xor[0] ^ xor[i] ^ xor[j]
ans = min(ans, max(x, y, z) - min(x, y, z))
# 注:把 min max 拆开,改为下面的注释,可以明显加快速度
# mn = mx = x
# if y < mn: mn = y
# elif y > mx: mx = y
# if z < mn: mn = z
# elif z > mx: mx = z
# if mx - mn < ans: ans = mx - mn
if ans == 0: return 0 # 提前退出
return ans
|
#!/usr/bin/env python
# EASY-INSTALL-ENTRY-SCRIPT: 'awsebcli==3.0.10','console_scripts','eb'
__requires__ = 'awsebcli==3.0.10'
import sys,os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../lib/python2.7/site-packages/')
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('awsebcli==3.0.10', 'console_scripts', 'eb')()
)
|
import numpy as np
import pandas as pd
class Atom(object):
def __init__(self, atom_index, atom_name, xyz):
self.idx = atom_index
self.name = atom_name
self.point = xyz
def __str__(self):
return "{} {} {}".format(self.name, self.idx, self.point)
class Structure(object):
def __init__(self, name, atoms):
self.name = name
self._init_atoms(atoms)
def _init_atoms(self, atoms):
self.atoms = []
names = atoms['atom'].values
atom_index = atoms['atom_index'].values
coordinates = np.asarray(atoms[['x', 'y', 'z']])
for i in range(len(names)):
self.atoms.append(Atom(atom_index[i], names[i], coordinates[i, :]))
def __str__(self):
s = self.name
for atom in self.atoms:
s = "{}\n {}".format(s, atom)
return s
def get_structures(fname, limit=None, seed=42):
np.random.seed(seed)
df_structures = pd.read_csv(fname)
df_structures = df_structures.set_index('molecule_name')
mol_names = df_structures.index
if limit:
mol_names = np.random.choice(mol_names, limit)
structures = list()
for mol_name in mol_names:
# print(mol_name)
df_s = df_structures.loc[mol_name]
# print(df_s)
structure = Structure(mol_name, df_s[['atom_index', 'atom', 'x', 'y', 'z']])
print(structure)
structures.append(structure)
return structures
def main():
# load structures
structures_file = "data/structures.csv"
structures = get_structures(structures_file, limit=1)
# load train data
# df_train = pd.read_csv("data/train.csv")
# print(df_train.shape)
# load test data
# df_test = pd.read_csv("data/test.csv")
# print(df_test.shape)
if __name__ == "__main__":
main()
|
import time
from math import sqrt, tan, sin, cos, pi, ceil, floor, acos, atan, asin, degrees, radians, log, atan2, acos, asin
from random import *
import numpy
from pymclevel import alphaMaterials, MCSchematic, MCLevel, BoundingBox
from mcplatform import *
import utilityFunctions
from helper import *
from ChunkAnalysis import *
from PlaceDistricts import *
from CreateRoads import *
inputs = (
("Remove Trees", "label"),
("Number to Remove", (1, 10))
)
displayName = "Remove Trees"
def perform(level, box, options):
startTime = time.time()
initializeHeightmap(level, box)
for i in range(options["Number to Remove"]):
listOfTrees = list(treeMap)
if len(listOfTrees) == 0:
break
x = choice(listOfTrees)
deleteTree(level, x[0], x[1])
endTime = time.time()
print "Finished in " + str(endTime - startTime) + " seconds"
return
|
# Collects statistics related to an experiment consisting of multiple dialog
# session.
class Pixel(object):
"""Experiment-level tracker.
Collects statistics for an a group of dialog-sessions comprising an
experiment.
Attributes:
num_sessions (int): Number of dialog-sessions in the experiment.
num_successes (int): Number of dialog-sessions that were successful.
num_failures (int): Number of dialog-sessions that failed.
num_terminations (int): Number of dialog-sessions that were forcefully
terminated by users.
total_length (int): Cumulative length of all dialog sessions.
"""
def __init__(self):
self.num_sessions = 0
self.num_successes = 0
self.num_failures = 0
self.num_terminations = 0
self.total_length = 0
def __repr__(self):
return ("Number of sessions={}, Number of successes={}, Number of "
"failures={}, Number of terminations={}, Total length={}"
.format(self.num_sessions, self.num_successes,
self.num_failures, self.num_terminations,
self.total_length))
def increment_num_sessions(self):
self.num_sessions += 1
def increment_num_successes(self):
self.num_successes += 1
def increment_num_failures(self):
self.num_failures += 1
def increment_num_terminations(self):
self.num_terminations += 1
def increment_total_length_by(self, length):
self.total_length += length
|
import argparse
import moviepy.editor as mp
def compose_clips(clip_paths, output_path, fade_duration=0.5):
clips = []
current_time = 0
max_fps = 1
for clip_path in clip_paths:
clip = mp.VideoFileClip(clip_path) #.crossfadeout(fade_duration)
if current_time != 0:
current_time -= fade_duration
clip = clip.set_start(current_time).crossfadein(fade_duration)
current_time += clip.duration
max_fps = max(max_fps, clip.fps)
clips.append(clip)
video = mp.CompositeVideoClip(clips)
video.write_videofile(output_path, audio_codec='aac', fps=max_fps)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Combine movie clips with 1s crossfade')
parser.add_argument('input_paths', metavar='N', type=str, nargs='+',
help='The clips to combine')
parser.add_argument("--output_path", type=str, help="The output clip")
parser.add_argument("--fade_duration", type=float, default=0.5, help="The duration of the crossfade, in seconds [default=0.5]")
args = parser.parse_args()
compose_clips(args.input_paths, args.output_path, args.fade_duration) |
# encoding: utf-8
import jieba
from src.utils import remove_punc
import math
import numpy as np
from src.utils import QueryItem, FAQItem
import logging.config
import logging
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from src.abcnn.abcnn_model import AbcnnModel
logging.config.fileConfig(fname='log.config')
def jaccard_similarity(list1, list2):
logger = logging.getLogger('jaccard_similarity')
intersection_res = list(set(list1).intersection(set(list2)))
union_res = list(set(list1).union(set(list2)))
sim = len(intersection_res) * 1.0 / (len(union_res) + 1e-9)
logger.info('jaccard_similarity calculate SUCCESS !')
return sim
def edit_similarity(v1, v2):
# vec1, vec2: vector
# 0-1 the bigger, the closer
logger = logging.getLogger('edit_similarity')
if len(v1) == 0:
return 1 - len(v2) / (max(len(v1), len(v2)) + 1e-9)
if len(v2) == 0:
return 1 - len(v1) / (max(len(v1), len(v2)) + 1e-9)
matrix = np.zeros((len(v1) + 1, len(v2) + 1))
matrix[0, :] = range(0, len(v2) + 1) # first row
matrix[:, 0] = range(0, len(v1) + 1) # first column
for i in range(1, len(v1) + 1):
for j in range(1, len(v2) + 1):
temp = 0 if v1[i - 1] == v2[j - 1] else 1
matrix[i, j] = min(matrix[i - 1, j] + 1,
matrix[i, j - 1] + 1, matrix[i - 1, j - 1] + temp)
result = 1 - matrix[len(v1), len(v2)] / (max(len(v1), len(v2)) + 1e-9)
logger.debug("edit_similarity()" + str(result))
logger.info('edit_similarity calculate SUCCESS !')
return result
def bm25_similarity(doc1_list, candits, k=1.5, b=0.75, avgl=12):
# doc1: string sentence
# candits: list of strings
logger = logging.getLogger('bm25_similarity')
doc1 = doc1_list
# 统计candits中的单词:频数; 存单个句子的词频
dic_candits = {} # 每个词在几个句子里出现
len_candits = len(candits)
count_dic = [] # 存放每个句子长度
list_dic_candits = [] # 单个句子的词频字典为元素
for question in candits: # question是list,元素是tokens
count_dic.append(len(question)) # 每个单词的数量
sentence_dic = {}
for word in question:
sentence_dic[word] = sentence_dic.get(word, 0) + 1
for word in set(question): # 为idf计算用,每个单词在几个句子里出现
dic_candits[word] = dic_candits.get(word, 0) + 1
list_dic_candits.append(sentence_dic)
# 计算dic_candits中每个词的idf
idf = {}
for word, freq in dic_candits.items():
idf[word] = math.log(len_candits - freq + 0.5) - math.log(freq + 0.5)
# define the score
score_result = []
for i in range(len(list_dic_candits)):
score = 0
for word in doc1:
idf_word = idf.get(word, 0)
# print("idf", word, ":", idf_word)
score += idf_word * (list_dic_candits[i].get(word, 0) / count_dic[i]) * (k + 1) / (
(list_dic_candits[i].get(word, 0) / count_dic[i]) + k * (1 - b + b * len(doc1) / avgl) + 1)
score = 1.0 / (1 + math.exp(-score))
score_result.append(score)
logger.info('bm25_similarity calculate SUCCESS !')
return score_result
def cal_jaccard_similarity(query_item, retrieval_result):
logger = logging.getLogger('cal_jaccard_similarity')
v1 = query_item.query_tokens_zi
for item in retrieval_result:
question = item.question_tokens_zi
logger.debug('cal_jaccard_similarity: query tokens of query item'+str(v1))
logger.debug('cal_jaccard_similarity: item question_tokens in retrieval_result'+str(question))
item.jaccard_similarity_score = jaccard_similarity(v1, question)
logger.info('cal_jaccard_similarity finished SUCCESS !')
def cal_edit_similarity(query_item, retrieval_result):
logger = logging.getLogger('cal_edit_similarity')
v1 = query_item.query_tokens_zi
for item in retrieval_result:
question = item.question_tokens_zi
logger.debug('cal_edit_similarity: query tokens of query item' + str(v1))
logger.debug('cal_edit_similarity: item question_tokens in retrieval_result' + str(question))
item.edit_similarity_score = edit_similarity(v1, question)
logger.info('cal_edit_similarity finished SUCCESS !')
def cal_bm25_similarity(query_item, retrieval_result):
logger = logging.getLogger('cal_bm25_similarity')
doc1 = query_item.query_tokens_zi
candits = [item.question_tokens_zi for item in retrieval_result]
scores = bm25_similarity(doc1, candits)
logger.debug('cal_bm25_similarity: query tokens of query item' + str(doc1))
logger.debug('cal_bm25_similarity: item question_tokens in retrieval_result' + str(candits))
for i in range(len(retrieval_result)):
retrieval_result[i].bm25_similarity_score = scores[i]
logger.info('cal_bm25_similarity finished SUCCESS !')
def cal_abcnn_similarity(query_item: QueryItem, retrieval_result):
logger = logging.getLogger('abcnn')
abcnn = AbcnnModel()
sen1_list = [query_item.query for i in range(len(retrieval_result))]
sen2_list = [item.question for item in retrieval_result]
p_test, h_test = abcnn.transfer_char_data(sen1_list, sen2_list)
prd = abcnn.predict(p_test, h_test).tolist()
for i in range(len(retrieval_result)):
retrieval_result[i].abcnn_similarity = prd[i]
# print(item.abcnn_similarity)
logger.debug("abcnn similarity"+str(prd[i]))
logger.info('cal_abcnn_similarity finished SUCCESS !')
def match(query_item: QueryItem, retrieval_result):
logger = logging.getLogger('match')
cal_bm25_similarity(query_item, retrieval_result)
cal_edit_similarity(query_item, retrieval_result)
cal_jaccard_similarity(query_item, retrieval_result)
cal_abcnn_similarity(query_item, retrieval_result)
logger.info('match calculation finished SUCCESS !')
return retrieval_result
if __name__ == '__main__':
# s1 = '你 说 你 是 谁'
# s2 = '我 不 知道 你 是 谁'
# v1 = [12, 3, 4, 6]
# v2 = [2, 4, 5, 6]
# print(jaccard_similarity(s1.split(), s2.split()))
# print(dice_similarity(s1.split(), s2.split()))
# print(cos_similarity(v1, v2))
q = QueryItem()
q.query = "He did but the initiative did not get very far tomorrow however yesterday tomorrow however yesterday tomorrow however yesterday tomorrow however yesterday"
q.query_tokens = remove_punc(jieba.cut(q.query))
cand1 = FAQItem(q)
cand1.question = "He did but the initiative did not get very far today however yesterday tomorrow however yesterday tomorrow however yesterday tomorrow however yesterday"
cand1.question_tokens = remove_punc(jieba.cut(cand1.question))
cand2 = FAQItem(q)
cand2.question = "What happened the initiative does not go very far."
cand2.question_tokens = remove_punc(jieba.cut(cand2.question))
cand3 = FAQItem(q)
cand3.question = "Those who stand apart from reinforced cooperation"
cand3.question_tokens = remove_punc(jieba.cut(cand3.question))
retrieval_result = [cand1, cand2, cand3]
match(q, retrieval_result)
for item in retrieval_result:
print(item.question)
print('bm25', item.bm25_similarity_score)
print('edit', item.edit_similarity_score)
print('jaccard', item.jaccard_similarity_score)
|
#B
s=int(input())
dd=list(map(int,input().split()))
for i in range(s-1):
if(dd[i]>dd[i+1]):
print(i)
|
from stack import Stack
def dec_to_bin(dec):
# Finish the function
s = Stack()
while dec != 0:
b = dec%2
s.push(b)
dec = dec//2
binary = ''
while not s.isEmpty():
binary = binary + str(s.pop())
return binary
print(dec_to_bin(42) ) # 回傳 101010
print(dec_to_bin(100)) # 回傳 1100100 |
default_app_config = "modoboa.admin.apps.AdminConfig"
|
from token_types import Token_types
from typing import Union
'''
Token class used to store the token type and value.
'''
class Token:
def __init__(self, type: Token_types, value: Union[str, float]):
self.type = type
self.value = value
def __str__(self):
return 'Token({type}, {value})'.format(
type=self.type,
value=repr(self.value)
)
def __repr__(self):
return self.__str__() |
from scipy.special import beta, betainc
from scipy.integrate import solve_ivp, cumtrapz
from numpy import sign, absolute, arange, meshgrid
from matplotlib import pyplot as plt
"""
================================================================================
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TWO-PARAMETER (twopara) CLASS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
================================================================================
The two-parameter (twopara) class defines the periodic differential system of
autonomous expressions which are specific power-law based generalizations of
the sine and cosine relationships often referred to as Generalized Trigonometric
functions, Ateb functions, or Lindqvist functions. This class models the
autonomous system of two first-order differential equations
y' = a*sign(x)*|x|^(p-1), x(0) = x_0 (1)
x' = -b*sign(y)*|y|^(q-1), y(0) = y_0
for p,q > 1 and a,b > 0.
twopara( p , q , initial_conditions , coef=None )
exponents:
The exponents are given individually for each of p and q described
in the differential system (1).
initial_conditions:
The initial conditions argument is a tuple which provides the initial
conditions x_0 and y_0 necessary to solve the system of two first-order
equations (1). It is required in order to define the class.
initial_conditions = (x_0, y_0)
coef:
The coef argument is a tuple which provides the coefficients a and b in
the differential system (1). It is not a required argument. If coef is
not provided it is assumed that a=b=1.
coef = (a, b) or (1, 1)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
FUNCTIONS OF THE TWO-PARAMETER CLASS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The two-parameter class contains several functions which performs operations
with respect to the system in (1); namely, its inherent behaviors, qualities,
and representation.
s_max()
This function returns the maximum/minimum extrema of the generalized
sine function - i.e.,
s_max = ((q / a) * (b * |x_0|^p / p + a * |y_0|^q / q))^(1 / q)
c_max()
This function returns the maximum/minimum extrema of the generalized
cosine function - i.e.,
c_max = ((p / b) * (b * |x_0|^p / p + a * |y_0|^q / q))^(1 / p)
sum_constant()
This function returns the total constant equivalent of the generalized
ellipse - i.e.,
c = a * |x_0|^p / p + b * |y_0|^q / q
frequency()
This function returns the frequency of the parameterized functions
described by the system (1). The frequency is a function of all given
parameters of the system.
f = (p^(1 - 1/p) / q^(1/q)) * (|x_0|^p / p + |y_0|^q / q)^(1 - 1/p - 1/q)
period()
This function returns the period of the parameterized functions
described by the system (1). The period changes depending on the choice
of initial conditions, except in the case of Holder equality systems
when the period is invariant, such as the degenerate case of the
classical trigonometric functions when the period is 2pi. The period
functions uses the Beta function and the inherent symmetry of the
behaviors of the system between the four quadrants to return the value
of the period
P = 4 * Beta(1/p,1/q) / (q * f)
phase()
This function returns the phase shift of the parameterized functions
described by the system (1) for some set of initial conditions.
func()
This function defines the differential system (1).
para_func( tspan = None, max_step = None, solver = None )
This functions solves the differential system (1) using solve_ivp and
returns the two parametrized functions x(t) and y(t) which can be
thought of as the generalized cosine and generalized sine functions,
respectively. This function may take one argument for the tspan value
necessary in solve_ivp. If no value is given, the default value shall be
one full period of the system as calculated by period().
default:
tspan = USER_DEFINED or period()
max_step = USER_DEFINED or tspan / 100
solver = USER_DEFINED or 'RK45'
================================================================================
================================================================================
"""
class twopara:
#The indexed ordering of all values is intended to preserve that of traditional
#spatial coordinates, i.e.
#
# self.i_c = [self.i_c[0], self.i_c[1]] = (x_0, y_0)
# or
# y[0] -> generalized cosine, which traditionaly corresponds to x
# y[1] -> generalized sine, which traditionaly corresponds to y
#
def __init__(self, p, q, i_c, coef = None):
self.p = p
self.q = q
self.i_c = i_c
self.coef = coef or [1,1]
#For reference, this class concerns the power-law based system of two
#first order autonomous equations
#
# dy[0] = self.coef[1]*sign(y[1])*|y[1]|^(self.p - 1), y[1](0) = self.i_c[0]
# dy[1] = -self.coef[0]*sign(y[0])*|y[0]|^(self.q - 1), y[0](0) = self.i_c[1]
#
#This function returns the maximum value of the generalized sine function y[0]
def s_max(self):
return ((self.q / self.coef[1]) * (self.coef[0] * absolute(self.i_c[0])**self.p / self.p + self.coef[1] * absolute(self.i_c[1])**self.q / self.q))**(1 / self.q)
#This function returns the maximum value of the generalized cosine function y[1]
def c_max(self):
return ((self.p / self.coef[0]) * (self.coef[0] * absolute(self.i_c[0])**self.p / self.p + self.coef[1] * absolute(self.i_c[1])**self.q / self.q))**(1 / self.p)
def sum_constant(self):
return self.coef[0] * absolute(self.i_c[0])**self.p / self.p + self.coef[1] * absolute(self.i_c[1])**self.q / self.q
#This function returns the frequency of the system
def frequency(self):
return (self.coef[0] * (self.p / self.coef[0])**(1 - 1/self.p) / (self.q / self.coef[1])**(1/self.q)) * ((self.coef[0] / self.p) * absolute(self.i_c[0])**self.p + (self.coef[1] / self.q) * absolute(self.i_c[1])**self.q)**(1 - 1/self.p - 1/self.q)
#This function returns the period of system using the Euler Beta function
def period(self):
return 4 * beta(1/self.p, 1/self.q) / (self.q * self.frequency())
#This function returns the phase shift using the incomplete Beta function
def phase(self):
if self.i_c[0] == 0:
return 0.0
else:
UPPER_BOUND = self.i_c[1] / ((self.coef[0] / self.p) * absolute(self.i_c[0])**self.p + (self.coef[1] / self.q) * absolute(self.i_c[1])**self.q)**(1 - 1/self.p)
return betainc(UPPER_BOUND, 1/self.p, 1/self.q) / (self.q * self.frequency())
#This function returns the expressions of the derivatives dy[i] of each
#respective function y[i] as defined by y[j]
def func(self, t, y):
return [ sign(y[1]) * absolute(y[1])**(self.p - 1), -sign(y[0]) * absolute(y[0])**(self.q - 1) ]
#This function returns the solution to the system using solve_ivp. Because
#we adhere to the spatial coordinate ordering, self.i_c = (x_0, y_0), we
#must reverse this order in solve_ivp because it indexes by the given
#ordering of dy. It returns the solution set such that it respects the
#traditional Cartesian ordering, i.e. - t, x(t), y(t)
def para_func(self, tspan = None, max_step = None, solver = None):
tspan = tspan or self.period()
max_step = max_step or tspan / 100
solver = solver or 'RK45'
SOLUTION = solve_ivp(self.func, [0, tspan], [self.i_c[1], self.i_c[0]], max_step = max_step, method = solver)
return SOLUTION.t, SOLUTION.y[1], SOLUTION.y[0]
"""
================================================================================
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PARAPLOT CLASS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
================================================================================
The paraplot class plots and organizes the various output from the twopara
class.
paraplot( twopara )
twopara:
The twopara argument is an object from the twopara class which caries
all of the associated values for displaying the appropriate plots.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
FUNCTIONS OF THE PARAPLOT CLASS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
gen_ellipse()
This function returns a plot of the generalized ellipse with respect to
the system described by (1). The generalized ellipse is the phase
portrait representation of the system for some set of parameters.
a * |x|^p / p + b * |y|^q / q = a * |x_0|^p / p + b * |y_0|^q / q
para_plot( single_plot = None )
This functions returns a plot of the generalized trigonometric
functions with respect to solving (1). The default is to plot both
parameterized functions on the same graph, but the user may select only
one plot or the other by passing the argument 's' for the generalized
sine or 'c' for the generalized cosine using the single_plot parameter
single_plot = s or c or None for BOTH
================================================================================
================================================================================
"""
class paraplot:
def __init__(self, twopara_object):
self.twopara_object = twopara_object
#This function plots the generalized ellipse which defines the phase
#portrait for the specific set of initial conditions, which are plotted as
#a phase shifted line along the circumference.
def gen_ellipse(self, grid_step = None):
grid_step = grid_step or [self.twopara_object.c_max() / 1000, self.twopara_object.s_max() / 1000]
x = arange(-1.15*self.twopara_object.c_max(), 1.15*self.twopara_object.c_max(), grid_step[0])
y = arange(-1.15*self.twopara_object.s_max(), 1.15*self.twopara_object.s_max(), grid_step[1])
X, Y = meshgrid(x, y)
Z = self.twopara_object.coef[0] * absolute(X)**self.twopara_object.p / self.twopara_object.p + self.twopara_object.coef[1] * absolute(Y)**self.twopara_object.q / self.twopara_object.q
level = self.twopara_object.coef[0] * absolute(self.twopara_object.i_c[0])**self.twopara_object.p / self.twopara_object.p + self.twopara_object.coef[1] * absolute(self.twopara_object.i_c[1])**self.twopara_object.q / self.twopara_object.q
plt.contour(X, Y, Z, colors='black', levels = [level])
plt.plot([0,self.twopara_object.i_c[0]], [0,self.twopara_object.i_c[1]], 'k:', self.twopara_object.i_c[0], self.twopara_object.i_c[1], 'ko')
plt.title('p = ' + str(self.twopara_object.p) + ', q = ' + str(self.twopara_object.q) + '\ncoefficients a = ' + str(self.twopara_object.coef[0]) + ' and b = ' + str(self.twopara_object.coef[1]) + ', initial conditions = ' + '(' + str(self.twopara_object.i_c[0]) + ',' + str(self.twopara_object.i_c[1]) + ')')
plt.text(1.1*self.twopara_object.i_c[0], 1.1*self.twopara_object.i_c[1], '(' + str(self.twopara_object.i_c[0]) + ',' + str(self.twopara_object.i_c[1]) + ')')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
#This function plots the generalized sine and cosine values of the specified
#system. Default plots both. Changing single_plot to 's' or 'c' plots each
#individually.
def para_plot(self, tspan = None, max_step = None, single_plot = None):
t, c, s = self.twopara_object.para_func(tspan, max_step)
if single_plot == 's':
plt.plot(t, s, 'k')
plt.title('Generalized Sine function')
plt.ylabel('y')
elif single_plot == 'c':
plt.plot(t, c, 'k--')
plt.title('Generalized Cosine function')
plt.ylabel('x')
else:
plt.plot(t, s, 'k', t, c, 'k--')
plt.title('Generalized Sine and Cosine functions')
plt.legend(['gen sine', 'gen cosine'], loc=3)
plt.xlabel('t')
plt.show()
|
import queue
def reverseChild(root):
if root is None:
return None
temp = root.left
root.left = root.right
root.right = temp
reverseChild(root.left)
reverseChild(root.right)
return root
class Node(object):
def __init__(self, value):
self.val = value
self.left = None
self.right = None
class createTreeFromLevelOrder(object):
def __init__(self, level_order):
self.level_order = level_order
def findLeftChild(self, index):
if((index*2 + 1)>= len(self.level_order)):
return None
return Node(self.level_order[(index*2) + 1])
def findRightChild(self, index):
if((index*2 + 2)>= len(self.level_order)):
return None
return Node(self.level_order[(index*2) + 2])
def createTree(self, index):
if(index>= len(self.level_order)):
return None
root = Node(self.level_order[index])
root.left = self.findLeftChild(index)
root.right = self.findRightChild(index)
self.createTree(index + 1)
return root
def levelOrder(root):
result = []
q = queue.Queue(maxsize=20)
q.put(root)
while(not q.empty()):
node = q.get()
result.append(node.val)
if node.left is not None:
q.put(node.left)
if node.right is not None:
q.put(node.right)
return result
if __name__ == "__main__":
arr = ['a', 'b', 'c', 'd', 'e', 'f']
tree = createTreeFromLevelOrder(arr)
root = tree.createTree(0)
#root_reverse = reverseChild(root)
result = levelOrder(root)
print(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.