text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
# @Time : 2019.9.18
# @Author : Xie Junming
# @Licence : bio-totem
from PIL import Image
import numpy as np
from skimage import io
from imutils import paths
import os
from tqdm import tqdm
import re
import cv2
import concurrent.futures
import time
step = 512
patch_size = 512
scale_range = 0.6
stain_channels = ['h', 's', 'v']
aug_num = 2
read_base_dir = '/cptjack/totem/Colon Pathology/openslide_test/ICIAR2018_BACH_Challenge/Train/Photos/'
save_base_dir = "./data/dataset_step_%(step)s_patch_%(patch)s_scale_%(scale)s" \
"_channels_%(channels)s_aug_%(aug)s" % {'step': step, 'patch': patch_size, 'scale': scale_range,
'channels': ''.join(stain_channels), 'aug': aug_num}
class data_preprocessing:
def __init__(self,
step=512,
patch_size=512,
scale_range=0,
stain_channels=['h', 's', 'v'],
aug_num=2):
"""
对原数据集进行截图、颜色增强,并保存为自己的数据集。
由于我开启了多进程,因此调用时需要在if __name__ == '__main__':里面运行,
若果你不想开启多线程,你可以在line 144~151进行修改,具体可以参考./demo/data_processing_demo.py
# Arguments
img_list: 图像的文件路径
step: 截图移动的步长,默认512
patch_size: 截图保存的像素大小,默认512
scale_range: 染色的方差,原图就会乘以一个[1 - scale_range, 1 + scale_range]内的一个随机数作为颜色增强,默认不做颜色增强
stain_channels: 染色的通道,默认h, s, v 通道
aug_num: 颜色增强的次数
"""
self.step = step
self.patch_size = patch_size
self.scale_range = scale_range
self.stain_channels = stain_channels
self.aug_num = aug_num
def get_scale(self):
while 1:
scale = np.random.uniform(low=1.08-self.scale_range, high=1.08+self.scale_range)
if abs(scale-1)>0.012:
break
return scale
def hsv_aug(self, img):
"""
对图片进行颜色增强。
:param img: Img矩阵,注意,输入的Img矩阵的三个通道默认为RGB,即和opencv默认读取的通道一致
:return: 染色增强后的img矩阵
"""
if self.scale_range == 0:
return 0
else:
hsv_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
if 'h'in self.stain_channels:
scale = self.get_scale()
hsv_img[:, :, 0] = hsv_img[:, :, 0] * scale
elif 's' in self.stain_channels:
scale = self.get_scale()
hsv_img[:, :, 1] = hsv_img[:, :, 1] * scale
elif 'v' in self.stain_channels:
scale = self.get_scale()
hsv_img[:, :, 2] = hsv_img[:, :, 2] * scale
hsv_img[:, :, 2] = hsv_img[:, :, 2] * (hsv_img[:, :, 2] < 255) + (hsv_img[:, :, 2] >= 255)*255
img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB)
return img
def get_patch(self, file, result_dir, test_data=False):
"""
对图片进行切割,获取patch,并对其进行保存操作。file为大图的文件名称,图片保存的方式举个例子,对test.png进行截图,
则保存为test_x_y_.png,染色增强的图片保存为test_x_y_0.png,其中,x,y为截图在原图中所对应的坐标。
:param file: 大图的文件名称
:param result_dir: 截图后保存的文件夹名称
:param test_data: 是否为测试集数据,若为True,则不进行颜色增强
:return: 最后保存截图及其颜色增强的图片
"""
f = re.split(r'/|\\', file)[-1]
f_name = f.split('.')[-2]
img = Image.open(file)
img = np.asarray(img)
h_count = img.shape[0] // self.step
w_count = img.shape[1] // self.step
for y in range(h_count):
for x in range(w_count):
x0 = x * self.step
x1 = x0 + self.patch_size
y0 = y * self.step
y1 = y0 + self.patch_size
patch = img[y0:y1, x0:x1, :]
rgb_s = (abs(patch[:, :, 0] - 107) >= 93) & (abs(patch[:, :, 1] - 107) >= 93) & (
abs(patch[:, :, 2] - 107) >= 93)
if np.sum(rgb_s) >= (self.patch_size * self.patch_size) * 0.6:
continue
if patch.shape != (self.patch_size, self.patch_size, 3):
continue
elif test_data:
io.imsave(result_dir + '/' + f_name + '_' + str(x) + '_' + str(y) + '_.png', patch)
continue
else:
io.imsave(result_dir + '/' + f_name + '_' + str(x) + '_' + str(y) + '_.png', patch)
for i in range(self.aug_num):
save_path = result_dir + '/' + f_name + '_' + str(x) + '_' + str(y) + '_' + str(i) + '.png'
patch_save = self.hsv_aug(patch)
if np.sum(patch_save):
io.imsave(save_path, patch_save)
def cut_data(self, img_list, save_dir, test_data=False):
"""
主要函数,遍历img_list里所有的大图文件,对其进行切割操作,同时进行染色变换。
由于涉及到很多I/O操作(图片的读取和保存),所以我在这里用了多进程操作,以提高图片处理速度。
:param img_list: 大图的文件名组成的list
:param save_dir: 图片增强后保存的文件夹路径
:param test_data: 默认为False, 如果为True, 则只进行切图,不进行染色,以缩短验证时间
:return: None
"""
if not os.path.exists(save_dir):
os.mkdir(save_dir)
print('\nsaving in %s'%save_dir)
start_time = time.asctime(time.localtime(time.time()))
# 开启多进程
with concurrent.futures.ProcessPoolExecutor(30) as executor:
for img in img_list:
executor.submit(self.get_patch, img, save_dir, test_data)
# 不开启多进程
# for i in tqdm(img_list):
# self.get_patch(i, save_dir, test_data=test_data)
print('\nstart at %s'%start_time)
print('\nend at %s'%time.asctime(time.localtime(time.time())))
if __name__=='__main__':
"""
这里主要涉及到文件读取和文件夹创建的操作
"""
def read_file(filename='../ICIAR_visualization/classify/class_0'):
with open(filename) as f:
train_paths = []
line = f.readline().rstrip('\n')
line = re.split(r'/|\\', line)[-1]
train_paths.append(line)
while line:
line = f.readline().rstrip('\n')
line = re.split(r'/|\\', line)[-1]
train_paths.append(line)
return train_paths
train_dir = os.path.sep.join([save_base_dir, 'train'])
test_dir = os.path.sep.join([save_base_dir, 'test'])
CLASSES = ["Normal", "Benign", "Insitu", "Invasive", ]
read_dir, train_class_dir, test_class_dir = [], [], []
for v in CLASSES:
read_dir.append(read_base_dir + v)
train_class_dir.append(os.path.sep.join([train_dir, v]))
test_class_dir.append(os.path.sep.join([test_dir, v]))
if not os.path.exists(os.path.sep.join([train_dir, v])):
os.makedirs(os.path.sep.join([train_dir, v]))
if not os.path.exists(os.path.sep.join([test_dir, v])):
os.makedirs(os.path.sep.join([test_dir, v]))
train_paths = {}
for i, v in enumerate(read_dir):
train_paths[i] = list(paths.list_images(v))
# load test data
files = [
"/cptjack/totem/xjunming/ICIAR/ICIAR_visualization/classify/test"
]
val_type = []
for f in files:
val_type.extend(read_file(f))
val_type = [i for i in val_type if i != '']
test_paths = {}
for t in train_paths:
test_paths[t] = []
for i, v in enumerate(train_paths[t]):
if re.split(r'/|\\', v)[-1] in val_type:
test_paths[t].append(v)
train_paths[t].remove(v)
else:
pass
print("\n start cutting pics")
p = data_preprocessing(
step=step,
patch_size=patch_size,
scale_range=scale_range,
stain_channels=stain_channels,
aug_num=aug_num)
for i in tqdm(train_paths):
p.cut_data(train_paths[i], save_dir=train_class_dir[i])
for i in tqdm(test_paths):
p.cut_data(test_paths[i], save_dir=test_class_dir[i], test_data=True)
|
#!/usr/bin/env python
"""Script to import 'sys' module and investigate some of its properties"""
__author__ = 'Saul Moore (sm5911@imperial.ac.uk)'
__version__ = '0.0.1'
import sys
print "This is the name of the script: ", sys.argv[0] # Prints the name of the module
print "Number of arguments: ", len(sys.argv) # Shows the number of arguments
print "The arguments are: ", str(sys.argv) # Prints arguments
def main(argv): # Main function, arguments obtained in the if (__name__ == "__main__"): part of the script are fed to this main function, where printing of the following line occurs
print 'This is boilerplate'
sys.exit(status)
|
import os
from json import loads
def getEnv(key):
try:
path = os.path.abspath('env.json')
arq = open(path,'r')
j = loads(arq.read())
return str(j[key])
except:
try:
return os.environ[key]
except:
raise Exception(f'Environment variable {key} not found')
|
from flask import request
from flask_restx import Namespace, Resource, fields, reqparse, marshal
from src.api.users.views import extract_token
from src.api.users.crud import get_user_by_session_token
from src.api.reviews.crud import (
get_all_reviews,
get_review_by_id,
get_reviews_by_place,
get_reviews_by_user,
get_reviews_composite,
add_review,
update_review,
delete_review,
)
reviews_namespace = Namespace("reviews")
review = reviews_namespace.model(
"Review",
{
"id": fields.Integer(readOnly=True),
"user_id": fields.Integer(required=True),
"place_id": fields.Integer(required=True),
"rating": fields.Integer(required=True),
"text": fields.String(required=True),
"created_date": fields.DateTime,
},
)
review_fields = {
"id": fields.Integer,
"user_id": fields.Integer,
"place_id": fields.Integer,
"rating": fields.Integer,
"text": fields.String,
"created_date": fields.DateTime,
}
class ReviewsList(Resource):
@reviews_namespace.marshal_with(review, as_list=True)
def get(self):
"""Return query result on reviews based on place/user id"""
parser = reqparse.RequestParser()
parser.add_argument("user", type=int, required=False)
parser.add_argument("place", type=int, required=False)
args = parser.parse_args()
user_id = args.get("user")
place_id = args.get("place")
if user_id is None and place_id is None:
reviews = get_all_reviews()
elif user_id is None:
reviews = get_reviews_by_place(place_id)
elif place_id is None:
print(user_id)
reviews = get_reviews_by_user(user_id)
else:
reviews = get_reviews_composite(user_id=user_id, place_id=place_id)
return (reviews, [])[reviews is None]
@reviews_namespace.response(200, "Review updated successfully!")
@reviews_namespace.response(400, "Request body malformed.")
@reviews_namespace.response(400, "Invalid rating value.")
def post(self):
"""Creates a new review."""
# Extract token
was_successful, session_token = extract_token(request)
response_object = {}
if not was_successful:
response_object["message"] = session_token
return response_object, 400
# Check token validity
user = get_user_by_session_token(session_token)
if user is None:
response_object["message"] = "Invalid Token."
return response_object, 400
# Create / validate Review object
user_id = user.id
post_data = request.get_json()
place_id = post_data.get("place_id")
rating = post_data.get("rating")
text = post_data.get("text")
response_object = {}
if None in [user_id, place_id, rating, text]:
response_object["message"] = "Request body malformed."
return response_object, 400
elif (type(rating) != int) or not (0 <= rating <= 5):
response_object["message"] = "Request body malformed."
return response_object, 400
else:
add_review(user_id, place_id, rating, text)
response_object["message"] = "Review posted successfully!"
return response_object, 201
class Reviews(Resource):
@reviews_namespace.marshal_with(review)
@reviews_namespace.response(200, "Success")
@reviews_namespace.response(400, "Cannot edit other user's review")
@reviews_namespace.response(404, "Review <review_id> does not exist")
def get(self, review_id):
"""Returns a single review."""
review = get_review_by_id(review_id)
if review is None:
reviews_namespace.abort(404, f"Review {review_id} does not exist")
return review, 200
@reviews_namespace.response(200, "Review updated successfully!")
@reviews_namespace.response(404, "Review <review_id> does not exist.")
def put(self, review_id):
"""Updates the star rating / text of a review."""
# Extract token
was_successful, session_token = extract_token(request)
response_object = {}
if not was_successful:
response_object["message"] = session_token
return response_object, 400
# Check token validity
user = get_user_by_session_token(session_token)
if user is None:
response_object["message"] = "Invalid token."
return response_object, 400
# Create / validate Review object
user_id = user.id
review = get_review_by_id(review_id)
if review is None:
reviews_namespace.abort(404, f"Review {review_id} does not exist")
elif user_id != review.user_id:
reviews_namespace.abort(400, "Cannot edit other user's review")
post_data = request.get_json()
rating = post_data.get("rating")
text = post_data.get("text")
response_object = {}
review = get_review_by_id(review_id)
if not review:
reviews_namespace.abort(404, f"Review {review_id} does not exist.")
new_review = update_review(review, rating, text)
response_object["message"] = f"Review {review.id} was updated!"
print(new_review)
return marshal(new_review, review_fields), 200
@reviews_namespace.response(200, "<review_id> was removed successfully!")
@reviews_namespace.response(404, "Review <review_id> does not exist.")
def delete(self, review_id):
"""Deletes a review."""
review = get_review_by_id(review_id)
response_object = {}
# User validation before deleting
was_successful, session_token = extract_token(request)
response_object = {}
if not was_successful:
response_object["message"] = session_token
return response_object, 400
# Check token validity
user = get_user_by_session_token(session_token)
if user is None:
response_object["message"] = "Invalid token."
return response_object, 400
# Create / validate Review object
user_id = user.id
review = get_review_by_id(review_id)
if review is None:
reviews_namespace.abort(404, f"Review {review_id} does not exist")
elif user_id != review.user_id:
reviews_namespace.abort(400, "Cannot delete other user's review")
if not review:
reviews_namespace.abort(404, f"Review {review_id} does not exist.")
delete_review(review)
response_object["message"] = f"Review {review.id} was deleted."
return response_object, 200
reviews_namespace.add_resource(ReviewsList, "")
reviews_namespace.add_resource(Reviews, "/<int:review_id>")
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
import uuid
import os
def image_file_path(instance, filename):
"""Generate file path for new image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/images/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new User"""
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
"""Creates and saves a new super user"""
user = self.create_user(email, password, **extra_fields)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class Image(models.Model):
"""Model for image"""
image = models.ImageField(null=True, upload_to=image_file_path)
class Book(models.Model):
author = models.CharField(max_length=100)
title = models.CharField(max_length=100)
description = models.TextField()
image = models.ForeignKey(Image, on_delete=models.SET_NULL, null=True, blank=True)
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
books = models.ManyToManyField(Book, related_name="uploader")
def __str__(self):
return "{} {}".format(self.first_name, self.last_name)
objects = UserManager()
USERNAME_FIELD = 'email'
|
from flask import Flask, render_template, request, session
from flask_session import Session
from werkzeug.wrappers import Request, Response
app=Flask(__name__, template_folder='template')
app.config["SESSION_PERMANENT"]=False
app.config["SESSION_TYPE"]="filesystem"
Session(app)
notes=[]
@app.route("/", methods=["GET", "POST"])
def index():
if request.method== "POST":
note=request.form.get("note")
notes.append(note)
return render_template("index.html", notes=notes)
|
sum = 0
for x in range(1,1001):
sum+= x**x
sum = str(sum)
print sum[len(sum)-10:len(sum)]
|
#!/usr/bin/env python3
# encoding: utf-8
"""
@version: 0.1
@author: lyrichu
@license: Apache Licence
@contact: 919987476@qq.com
@site: http://www.github.com/Lyrichu
@file: test_GA.py
@time: 2018/06/06 16:45
@description:
test for GA
"""
from time import time
import sys
sys.path.append("..")
from sopt.GA.GA import GA
from sopt.util.functions import *
from sopt.util.ga_config import *
from sopt.util.constraints import *
class TestGA:
def __init__(self):
self.func = Rosenbrock
self.func_type = Rosenbrock_func_type
self.variables_num = Rosenbrock_variables_num
self.lower_bound = Rosenbrock_lower_bound
self.upper_bound = Rosenbrock_upper_bound
self.cross_rate = 0.8
self.mutation_rate = 0.1
self.generations = 300
self.population_size = 200
self.binary_code_length = 20
self.cross_rate_exp = 1
self.mutation_rate_exp = 1
self.code_type = code_type.real
self.cross_code = False
self.select_method = select_method.proportion
self.rank_select_probs = None
self.tournament_num = 2
self.cross_method = cross_method.uniform
self.arithmetic_cross_alpha = 0.1
self.arithmetic_cross_exp = 1
self.mutation_method = mutation_method.uniform
self.none_uniform_mutation_rate = 1
self.complex_constraints = [constraints1,constraints2,constraints3]
self.complex_constraints_method = complex_constraints_method.penalty
self.complex_constraints_C = 1e8
self.M = 1e8
self.GA = GA(**self.__dict__)
def test(self):
start_time = time()
self.GA.run()
print("GA costs %.4f seconds!" % (time()-start_time))
self.GA.save_plot()
self.GA.show_result()
if __name__ == '__main__':
TestGA().test()
|
import turtle
def drawCurve(turtle, l,order):
if order==0:
turtle.forward(5)
return
else:
l/=3
drawCurve(turtle,l,order-1)
turtle.left(60)
drawCurve(turtle,l,order-1)
turtle.right(120)
drawCurve(turtle,l,order-1)
turtle.left(60)
drawCurve(turtle,l,order-1)
if __name__=='__main__':
turtle.setup(800, 400)
turtle.up()
turtle.goto(-400,-250)
turtle.down()
drawCurve(turtle, 300, 4)
turtle.exitonclick()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from ..login.models import User
# Create your models here.
class LocationManager(models.Manager):
def validate_and_create(self, data, id):
print data, "\n woo we have data"
errors = []
if len(data['name']) < 2:
print "name too short"
errors.append('name is too short')
if len(data['street_address']) < 2:
print "street_address too short"
errors.append('street address is too short')
if len(data['city']) < 2:
print "city too short"
errors.append('city name is too short')
if not len(data['state']) == 2:
errors.append('state abbreviation must be two characters long')
try:
zipcode = int(data['zip_code'])
except:
errors.append('zip code must be numbers')
if errors:
return (False, errors)
else:
current_user = User.objects.get(id=id)
#actually do our database stuff
new_obj = Location.objects.create(
name=data['name'],
street_address=data['street_address'],
city=data['city'],
state_abbrev=data['state'],
zip_code=zipcode,
captain=current_user
)
return (True, new_obj)
def custom_delete_function(self, id):
# all validations go here
#if logged in user has permissions to do so:
# delete
# else:
# return errors
# don't touch the db unless all validations
pass
def add_stack(self, data):
print data
try:
location = self.get(id=data['location'])
stack = Stack.objects.get(id=data['stack'])
location.stacks_offered.add(stack)
location.save()
print 'all is well'
except:
print 'error happened'
# class Employee(models.Model):
# user = models.OneToOneField(User)
# is_active = models.BooleanField()
# hire_date = models.DateTimeField()
# location = models.ForeignKey(Location, null=True)
# created_at = models.DateTimeField(auto_now_add=True)
# updated_at = models.DateTimeField(auto_now=True)
#
# class Category(models.Model):
# name = models.CharField(max_length=255)
# created_at = models.DateTimeField(auto_now_add=True)
# updated_at = models.DateTimeField(auto_now=True)
class Stack(models.Model):
language = models.CharField(max_length=255)
main_framework = models.CharField(max_length=255)
is_first_stack = models.BooleanField()
# category = models.ForeignKey(Category, related_name='stacks_in_category')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Location(models.Model):
name = models.CharField(max_length=255)
street_address = models.CharField(max_length=255)
city = models.CharField(max_length=255)
state_abbrev = models.CharField(max_length=2)
zip_code = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
captain = models.OneToOneField(User, null=True)
stacks_offered = models.ManyToManyField(Stack, related_name='available_locations')
objects = LocationManager()
|
import pygame
from random import randrange
import os
import pandas as pd
import fonts
def spawn_food(snake):
x_spawn = randrange(0, 510 - snake.width, snake.velocity)
y_spawn = randrange(0, 510 - snake.height, snake.velocity)
while [x_spawn, y_spawn] in snake.rectangles:
x_spawn = randrange(0, 510 - snake.width, snake.velocity)
y_spawn = randrange(0, 510 - snake.height, snake.velocity)
return x_spawn, y_spawn
def draw_food(x, y, window, snake_obj):
pygame.draw.rect(window, (255, 0, 0), (x, y, snake_obj.width, snake_obj.height))
def end_game(window, score, game_over, new_game, quit_text):
window.fill((0, 0, 0))
score_text = fonts.font.render("Score: " + score, True, (0, 0, 255))
g_o_height = game_over.get_height()
n_g_height = new_game.get_height()
q_height = quit_text.get_height()
all_height = g_o_height + n_g_height + q_height
window.blit(game_over, ((510 - game_over.get_width()) / 2, (510 - all_height) / 2))
window.blit(new_game, ((510 - new_game.get_width()) / 2, ((510 - all_height) / 2) + g_o_height))
window.blit(quit_text, ((510 - quit_text.get_width()) / 2, ((510 - all_height) / 2) + g_o_height + n_g_height))
window.blit(score_text, ((510 - score_text.get_width()) / 2, 60))
pygame.display.update()
loop = True
while loop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if ((510 - all_height) / 2) + g_o_height < pos[1] < 510 - ((510 - all_height) / 2) - q_height:
ret_val = True
loop = False
elif 510 - ((510 - all_height) / 2) - q_height < pos[1] < 510 - ((510 - all_height) / 2):
ret_val = False
loop = False
return ret_val
def evaluate_current_key(current_key, keys_in):
if keys_in[pygame.K_LEFT]:
if current_key != "right":
current_key = "left"
elif keys_in[pygame.K_RIGHT]:
if current_key != "left":
current_key = "right"
elif keys_in[pygame.K_UP]:
if current_key != "down":
current_key = "up"
elif keys_in[pygame.K_DOWN]:
if current_key != "up":
current_key = "down"
return current_key
def read_scores_file(score, window):
high_score = False
path_to_csv = "." + os.path.sep + "scores.csv"
try:
scores_data = pd.read_csv(path_to_csv)
except (NameError, FileNotFoundError):
blank_scores = pd.DataFrame(columns=["Name", "Score"])
blank_scores.to_csv(path_or_buf=path_to_csv, index=False)
scores_data = pd.read_csv(path_to_csv)
scores_data = scores_data.sort_values(["Score"], ascending=False)
print(scores_data)
if len(scores_data) > 8:
if int(score) > scores_data["Score"][7]:
print("New high score")
scores_data = scores_data.drop(8, axis=0)
high_score = True
else:
print("New high score")
high_score = True
if high_score:
new_score = pd.DataFrame([["game", int(score)]], columns=["Name", "Score"])
scores_data = scores_data.append(new_score, ignore_index=True)
scores_data = scores_data.sort_values(["Score"], ascending=False)
print(scores_data)
scores_data.to_csv(path_to_csv, columns=["Name", "Score"], index=False)
return show_score_screen(window, scores_data, score)
def show_score_screen(window, scores_data, score):
window.fill((0, 0, 0))
score_text = fonts.font.render("Score: " + score, True, (0, 0, 255))
window.blit(score_text, ((510 - score_text.get_width()) / 2, 60))
for i in range(1, 9):
if i > len(scores_data):
break
string_to_display = str(i) + ".) " + str(scores_data["Name"][i - 1]) + " " + str(
scores_data["Score"][i - 1])
text = fonts.small_font.render(string_to_display, True, (255, 165, 0))
window.blit(text, (
(510 - text.get_width()) / 2, 60 + (score_text.get_height() + (text.get_height() * (i - 1))) + (i * 20)))
# Set score_displayed variable
return True
|
ano = int(input("Digite o ano: "))
mes = int(input("Digite o mês em numero: "))
if (ano % 4 == 0) and (ano % 100 != 0):
if (mes == 1)or(mes == 3)or(mes == 5)or(mes == 7)or(mes == 8)or(mes == 10)or(mes == 12):
print("Esse mês tem 31 dias")
elif (mes == 4)or(mes == 6)or(mes == 9)or(mes == 11):
print("Esse mês tem 30 dias")
else:
print("Esse mês tem 29 dias")
else:
if (mes == 1)or(mes == 3)or(mes == 5)or(mes == 7)or(mes == 8)or(mes == 10)or(mes == 12):
print("Esse mês tem 31 dias")
elif (mes == 4)or(mes == 6)or(mes == 9)or(mes == 11):
print("Esse mês tem 30 dias")
else:
print("Esse mês tem 28 dias")
|
"""User model"""
from sqlalchemy import Column, Integer, BigInteger, ForeignKey, DateTime, Float, VARCHAR
from models.db import Model
from models.base_object import BaseObject
class Task(BaseObject, Model):
id = Column(Integer, primary_key=True)
UserNo = Column(Integer)
TrialNo = Column(Integer)
BlockNo = Column(Integer)
Horizon = Column(Integer)
ItemNo = Column(Integer)
InitialSampleNb = Column(Integer)
UnusedTree = Column(Integer)
InitialSample1Tree = Column(Integer)
InitialSample2Tree = Column(Integer)
InitialSample3Tree = Column(Integer)
InitialSample4Tree = Column(Integer)
InitialSample5Tree = Column(Integer)
InitialSample1Size = Column(Integer)
InitialSample2Size = Column(Integer)
InitialSample3Size = Column(Integer)
InitialSample4Size = Column(Integer)
InitialSample5Size = Column(Integer)
Tree1FutureSize1 = Column(Integer)
Tree1FutureSize2 = Column(Integer)
Tree1FutureSize3 = Column(Integer)
Tree1FutureSize4 = Column(Integer)
Tree1FutureSize5 = Column(Integer)
Tree1FutureSize6 = Column(Integer)
Tree2FutureSize1 = Column(Integer)
Tree2FutureSize2 = Column(Integer)
Tree2FutureSize3 = Column(Integer)
Tree2FutureSize4 = Column(Integer)
Tree2FutureSize5 = Column(Integer)
Tree2FutureSize6 = Column(Integer)
Tree3FutureSize1 = Column(Integer)
Tree3FutureSize2 = Column(Integer)
Tree3FutureSize3 = Column(Integer)
Tree3FutureSize4 = Column(Integer)
Tree3FutureSize5 = Column(Integer)
Tree3FutureSize6 = Column(Integer)
Tree4FutureSize1 = Column(Integer)
Tree4FutureSize2 = Column(Integer)
Tree4FutureSize3 = Column(Integer)
Tree4FutureSize4 = Column(Integer)
Tree4FutureSize5 = Column(Integer)
Tree4FutureSize6 = Column(Integer)
def get_id(self):
return str(self.id)
def get_user_no(self):
return str(self.UserNo)
def get_trial_no(self):
return str(self.TrialNo)
def get_block_no(self):
return str(self.BlockNo)
def get_horizon(self):
return str(self.Horizon)
def get_item_no(self):
return str(self.ItemNo)
def get_initial_sample_nb(self):
return str(self.InitialSampleNb)
def get_unused_tree(self):
return str(self.UnusedTree)
def get_sample_1_tree(self):
return (str(self.InitialSample1Tree)+(',')+str(self.InitialSample2Tree))
def get_sample_2_tree(self):
return str(self.InitialSample2Tree)
def get_sample_3_tree(self):
return str(self.InitialSample3Tree)
def get_sample_4_tree(self):
return str(self.InitialSample4Tree)
def get_sample_5_tree(self):
return str(self.InitialSample5Tree)
def get_sample_1_size(self):
return str(self.InitialSample1Size)
def get_sample_2_size(self):
return str(self.InitialSample2Size)
def get_sample_3_size (self):
return str(self.InitialSample3Size)
def get_sample_4_size (self):
return str(self.InitialSample4Size)
def get_sample_5_size(self):
return str(self.InitialSample5Size)
def get_tree1_future_size_1(self):
return str(self.Tree1FutureSize1)
def get_tree1_future_size_2(self):
return str(self.Tree1FutureSize2)
def get_tree1_future_size_3(self):
return str(self.Tree1FutureSize3)
def get_tree1_future_size_4(self):
return str(self.Tree1FutureSize4)
def get_tree1_future_size_5(self):
return str(self.Tree1FutureSize5)
def get_tree1_future_size_6(self):
return str(self.Tree1FutureSize6)
def get_tree2_future_size_1(self):
return str(self.Tree2FutureSize1)
def get_tree2_future_size_2(self):
return str(self.Tree2FutureSize2)
def get_tree2_future_size_3(self):
return str(self.Tree2FutureSize3)
def get_tree2_future_size_4(self):
return str(self.Tree2FutureSize4)
def get_tree2_future_size_5(self):
return str(self.Tree2FutureSize5)
def get_tree2_future_size_6(self):
return str(self.Tree2FutureSize6)
def get_tree3_future_size_1(self):
return str(self.Tree3FutureSize1)
def get_tree3_future_size_2(self):
return str(self.Tree3FutureSize2)
def get_tree3_future_size_3(self):
return str(self.Tree3FutureSize3)
def get_tree3_future_size_4(self):
return str(self.Tree3FutureSize4)
def get_tree3_future_size_5(self):
return str(self.Tree3FutureSize5)
def get_tree3_future_size_6(self):
return str(self.Tree3FutureSize6)
def get_tree4_future_size_1(self):
return str(self.Tree4FutureSize1)
def get_tree4_future_size_2(self):
return str(self.Tree4FutureSize2)
def get_tree4_future_size_3(self):
return str(self.Tree4FutureSize3)
def get_tree4_future_size_4(self):
return str(self.Tree4FutureSize4)
def get_tree4_future_size_5(self):
return str(self.Tree4FutureSize5)
def get_tree4_future_size_6(self):
return str(self.Tree4FutureSize6)
def errors(self):
errors = super(Trial, self).errors()
return errors |
#coding=utf-8
#__author__ = 'cclin'
#write by cclin 2021.03.24
import sys
import os,os.path
import re
import codecs
import xml.dom.minidom as minidom
from xml.etree import ElementTree as ET
# ==由于minidom默认的writexml()函数在读取一个xml文件后,修改后重新写入如果加了newl='\n',会将原有的xml中写入多余的行
# ==因此使用下面这个函数来代替
def fixed_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if len(self.childNodes) == 1 \
and self.childNodes[0].nodeType == minidom.Node.TEXT_NODE:
writer.write(">")
self.childNodes[0].writexml(writer, "", "", "")
writer.write("</%s>%s" % (self.tagName, newl))
return
writer.write(">%s"%(newl))
for node in self.childNodes:
if node.nodeType is not minidom.Node.TEXT_NODE:
node.writexml(writer,indent+addindent,addindent,newl)
writer.write("%s</%s>%s" % (indent,self.tagName,newl))
else:
writer.write("/>%s"%(newl))
minidom.Element.writexml = fixed_writexml
'''
# 1.创建DOM树对象
dom=minidom.Document()
# 2.创建根节点。每次都要用DOM对象来创建任何节点。
root_node=dom.createElement('root')
# 3.用DOM对象添加根节点
dom.appendChild(root_node)
# 用DOM对象创建元素子节点
book_node=dom.createElement('book')
# 用父节点对象添加元素子节点
root_node.appendChild(book_node)
# 设置该节点的属性
book_node.setAttribute('price','199')
name_node=dom.createElement('name')
root_node.appendChild(name_node)
# 也用DOM创建文本节点,把文本节点(文字内容)看成子节点
name_text=dom.createTextNode('计算机程序设计语言 第1版')
# 用添加了文本的节点对象(看成文本节点的父节点)添加文本节点
name_node.appendChild(name_text)
<?xml version="1.0" encoding="utf8"?>
<root>
<book price="99">
<name>计算机程序设计语言 第1版</name>
</book>
</root>
# 其他属性与方法:
# 获取根节点
root=dom.documentElement
# 节点名称
print(root.nodeName)
# 节点类型:'ELEMENT_NODE',元素节点; 'TEXT_NODE',文本节点; 'ATTRIBUTE_NODE',属性节点
print(root.nodeType)
# 获取某个节点下所有子节点,是个列表
print(root.childNodes)
# 根据标签名获取元素节点,是个列表
book=root.getElementsByTagName('book')
# 获取节点属性
print(book[0].getAttribute('price'))
# 获取某节点的父节点
print(author.parentNode.nodeName)
'''
def walk_dirs(dir,list,topdown=True):
for root, dirs, files in os.walk(dir, topdown):
for name in dirs:
path=os.path.join(root,name)+r"/"
path=path.replace("\\","/")
#pa=path.lower()
list.append(path)
print path
def walk_files(dir,list,aext,topdown=True):
for root, dirs, files in os.walk(dir, topdown):
for name in files:
ext=os.path.splitext(name)[1]
file=os.path.join(root,name)
file=file.replace("\\","/")
#print file
if (ext == aext) and file.find(".svn")==-1 and file not in list:
list.append(file)
#print file;
def forceDirectory(file):
path=os.path.dirname(file);
if not os.path.exists(path):
os.makedirs(path);
def nappendArrayToFile(rlist,rfile):
f=open(rfile,"a")
for line in rlist:
f.write(line+"\n")
f.flush()
f.close()
def nwriteArrayToFile(rlist,rfile):
forceDirectory(rfile);
f=open(rfile,"w")
for line in rlist:
f.write(line+"\n")
f.flush()
f.close()
def deleteEmptyLine(rfile):
f=open(rfile,"r")
newlist=[]
for line in f:
if line.strip()!="":
newlist.append(line)
wf=open(rfile,"w")
wf.truncate()
wf.writelines(newlist)
def getCurrentPath():
return os.path.split(os.path.realpath(__file__))[0]+r"/"
def writeArrayToFile(rlist,rfile):
f=open(rfile,"a")
for line in rlist:
f.write(line+"\n")
f.flush()
f.close()
def writeLineToFile(line,rfile):
f=open(rfile,"a")
f.write(line+"\n")
f.flush()
f.close()
def deleteFile(rfile):
if os.path.exists(rfile):
os.remove(rfile)
def deleteEmptyLine(rfile):
f=open(rfile,"r")
newlist=[]
for line in f:
if line.strip()!="":
newlist.append(line)
wf=open(rfile,"w")
wf.truncate()
wf.writelines(newlist)
def isPlatformRealWin32(projfile):
iswin32s=[];
iswin32s.append("2d/libcocos2d.");
iswin32s.append("proj.win32/");
isWin32Proj=False;
for line in iswin32s:
if projfile.find(line)>=0:
isWin32Proj=True;
nowin32s=[];
nowin32s.append("__PACKAGE_NAME__");
nowin32s.append("-template-");
for line in nowin32s:
if projfile.find(line)>=0:
isWin32Proj=False;
return isWin32Proj;
def getRootDir(projfile):
dirtrees=projfile.split("/");
rootdir="";
for dir in dirtrees:
if dir.startswith("cocos2d"):
break;
if rootdir!="":
rootdir=rootdir+"/";
rootdir=rootdir+dir;
if projtype=="project":
rootdir=rootdir+"\\..\\..";
#print rootdir;
forceDirectory(rootdir);
#print rootdir;
return rootdir;
def getEngineDir(projectfiles):
for line in projectfiles:
if line.find("/dragonbones/renderer/")>-1:
continue;
if line.find("cocos2d")>-1:
rootdir=getRootDir(line);
return rootdir;
print "error get Enginedir";
return "";
def SolutionRelList(fileName):
namelist=[];
namelist.append("libbox2d.vcxproj");
namelist.append("libcocos2d.vcxproj");
namelist.append("libluacocos2d.vcxproj");
namelist.append("libJSBinding.vcxproj");
namelist.append("libsimulator.vcxproj");
namelist.append("libSpine.vcxproj");
namelist.append("skydream.vcxproj");
namelist.append("quick_libs.vcxproj");
for line in namelist:
if fileName.strip()==line.strip():
return True;
return False;
def changeOutDir(projfile,rootdir):
#print projfile;
arr=projfile.split("/");
#print arr[len(arr)-1];
rootdir=rootdir.replace("/","\\");
if projtype=="project":
outdir="$(SolutionDir)..\\..\\..\\_out\\$(SolutionName)\\$(Configuration).$(Platform)\\";
intdir="$(SolutionDir)..\\..\\..\\_out\\$(SolutionName)\\_int\\$(ProjectName)_$(Configuration).$(Platform)\\";
logdir="$(SolutionDir)..\\..\\..\\_out\\$(SolutionName)\\_log\\$(ProjectName)_$(Configuration).$(Platform)\\";#vs log 2013
else:
outdir=rootdir+"\\_out\\$(SolutionName)\\$(Configuration).$(Platform)\\";
intdir=rootdir+"\\_out\\$(SolutionName)\\_int\\$(ProjectName)_$(Configuration).$(Platform)\\";
logdir=rootdir+"\\_out\\$(SolutionName)\\_log\\$(ProjectName)_$(Configuration).$(Platform)\\";#vs log 2013
print projtype,arr[len(arr)-1],outdir;
proj_dom=minidom.parse(projfile)
items=proj_dom.getElementsByTagName("OutDir")
for item in items:
item.childNodes[0].nodeValue=outdir;
items=proj_dom.getElementsByTagName("IntDir")
for item in items:
item.childNodes[0].nodeValue=intdir;
'''
items=proj_dom.getElementsByTagName("Path")
for item in items:
item.childNodes[0].nodeValue=logdir;
'''
prettyxml=proj_dom.toprettyxml()
f=open(projfile,"w")
f.write(prettyxml)
f.close()
def listVSProjs(path):
#path=r"F:\_ccWork\cocos\skydream\client";
projfiles=[];
global projtype;
realwinewproj=[];
projtypes=[];
walk_files(path,projfiles,".vcxproj");
for line in projfiles:
if isPlatformRealWin32(line):
realwinewproj.append(line);
projtypes.append(getDirType(line))
nwriteArrayToFile(realwinewproj,getCurrentPath()+"./vsprojs.txt");
projtype=getDirType(realwinewproj[0]);
rootdir=getEngineDir(realwinewproj);
#convert out & int dirs
i=0;
for line in realwinewproj:
#print line;
projtype=projtypes[i];
changeOutDir(line,path);
i=i+1;
def getDirType(projfile):
atype="project";
if projfile.endswith("Naruto.vcxproj"):
atype="project";
elif projfile.find(projroot)>-1 and projfile.find("cjoy_proj")==-1:
atype="project";
elif projfile.find("samples")>=0:
atype="samples";
elif projfile.find("template")>=0 or projfile.find("templates")>=0:
atype="template";
else:
atype="engine";
return atype;
def listSolution(path):
slnfiles=[];
walk_files(path,slnfiles,".sln");
nwriteArrayToFile(slnfiles,getCurrentPath()+"./vsslns.txt");
for line in slnfiles:
print line;
return slnfiles;
def copyProjects(path):
slnfiles=[];
projfiles=[];
global c2dx_ver
c2dx_ver="cocos2d-x-2.2.6";
global projroot
projroot="dx2-project";
global toolset
toolset="v142"
#toolset="v120_xp"
global buildir
buildir="$(SolutionDir)..\\..\\..\\_out\\$(SolutionName)\\"
global gpath
gpath="D:\\Cocos\\TheSilence\\"+projroot+"\\Naruto";
walk_files(gpath,slnfiles,".sln");
global c2dx
c2dx="D:\\Cocos\\TheSilence\\"+c2dx_ver;
walk_files(c2dx,projfiles,".vcxproj");
global prj
prj="D:\\Cocos\\TheSilence\\"+projroot+"\\";
walk_files(path,projfiles,".vcxproj");
'''
sprj=os.path.split(os.path.realpath(slnfiles[0]))[0]+r"/";
forceDirectory(sprj+"cjoy_proj"+r"/");
parseSolution(sprj,slnfiles[0],projfiles);
'''
for line in slnfiles:
print "copyProjects",line;
if line.find("cjoy.sln")>-1:
continue;
path=os.path.split(os.path.realpath(line))[0]+r"/";
forceDirectory(path+"cjoy_proj"+r"/");
parseSolution(path,line,projfiles);
def trim(s):
if len(s)==0:
return ''
if s[:1]==' ':
return trim(s[1:])
elif s[-1:]=='':
return trim(s[:-1])
else:
return s
def trimyin(s):
if len(s)==0:
return ''
if s[:1]=='"' or s[:1]=="'" or s[:1]==' ':
return trim(s[1:]);
elif s[-1:]=='"' or s[-1:]=="'" or s[-1:]==' ':
return trim(s[:-1])
else:
return s
def convertPath(slnpath,projfile,projfiles):
realpath="";
newfile="";
for line in projfiles:
arr=line.split("/");
if projfile==arr[len(arr)-1]:#TODO:同名
realpath=line;
break;
dt=getDirType(realpath);
print "dt",dt
if realpath!="": #and getDirType(realpath)!="project":
rf=open(realpath,"r")
cnts=[]
for line in rf:
cnts.append(line);
newfile=slnpath+"cjoy_proj\\"+projfile;
print realpath,newfile;
wf=open(newfile,"w")
wf.truncate();
wf.writelines(cnts)
wf.close();
fp=realpath+".filters";
if dt=="project":
fp=slnpath+projfile+".filters";
if os.path.exists(fp):
rff=open(fp,"r")
cntfs=[]
for line in rff:
cntfs.append(line);
newfilef=slnpath+"cjoy_proj\\"+projfile+".filters";
print "-----------------",fp,newfilef;
wff=open(newfilef,"w")
wff.truncate();
wff.writelines(cntfs)
wff.close();
if dt=="project":
return newfile,slnpath+projfile;
else:
return newfile,realpath;
def parseSolution(path,slnfile,projfiles):
rf=open(slnfile,"r")
cnts=[];
spath=os.path.split(os.path.realpath(slnfile))[0]+r"/";
ar=os.path.splitext(os.path.realpath(slnfile))[0].split("\\");
fs=ar[len(ar)-1];
#print "a",fs
for line in rf:
if line.startswith("Project"):
arr=line.split(",");
brr=trim(arr[1])[1:-1].split("\\");
fn=brr[len(brr)-1];
fn1=fn.split(".")[0];
#print "b",trim(arr[1]),fn,fn1;
cnts.append(arr[0]+',".\\cjoy_proj\\'+fn+'",'+arr[2]);
newfile,realpath=convertPath(path,fn,projfiles);
print "p",newfile,realpath
if newfile!="":
convertCodeRelative(newfile,realpath);
else:
cnts.append(line);
wf=open(spath+ar[len(ar)-1]+"_cjoy.sln","w");
wf.truncate();
wf.writelines(cnts);
def pathRelative(path1,path2):#path1 to path2
relpath=os.path.relpath(path2,path1);
#print "rel",relpath;
return relpath
def setFilters(filters1,filters2,inc,incnew):
i=0;
for it in filters1:
if it.getAttribute('Include')==inc:
#print i,len(filters1[i].childNodes),inc,incnew;
filters1[i].setAttribute('Include',incnew);
#if len(filters1[i].childNodes)>0:
# filters1[i].appendChild(filters2[i].childNodes[0]);
i=i+1;
def convertCodeRelative(newfile,realpath):
real_dom=minidom.parse(realpath)
print newfile;
new_dom=minidom.parse(newfile)
real_dom_filter=minidom.parse(realpath+".filters")
new_dom_filter=minidom.parse(newfile+".filters")
ritems=real_dom.getElementsByTagName("AdditionalIncludeDirectories")
nitems=new_dom.getElementsByTagName("AdditionalIncludeDirectories")
npath=os.path.split(os.path.realpath(newfile))[0]+r"/";
rpath=os.path.split(os.path.realpath(realpath))[0]+r"/";
#print npath,"aaa",rpath;
dt=getDirType(newfile);
print dt,"abcde",newfile
i=0;
for it in ritems:
val=ritems[i].childNodes[0].nodeValue;
#print val;
vals=val.split(";");
nvals="";
for line in vals:
#print 'line',line;
codepath=line;
if line[0:]!="$" and line.startswith(".."):
pa=pathRelative(npath,rpath+line);
codepath=pa;
#print 'bbb',pa;
elif line.find("$(ProjectDir)")>-1:
pa=os.path.split(os.path.realpath(line))[0]+r"/";
if dt=="project":
pa=rrpath=trimyin(trimyin(os.path.normpath(line.replace("$(ProjectDir)..\\..\\..\\","$(ProjectDir)..\\..\\..\\..\\..\\..\\"+c2dx_ver+"\\"))));
print 'aaa',pa
else:
rrpath=trimyin(trimyin(os.path.normpath(line.replace("$(ProjectDir)",rpath))));
#print 'ccc',rrpath,npath;
pa=pathRelative(npath,rrpath);
codepath="$(ProjectDir)"+pa;
nvals=nvals+codepath+";";
print "codepath,",codepath;
nitems[i].childNodes[0].nodeValue=nvals;
i=i+1;
rcompiles=real_dom.getElementsByTagName("ClCompile")
ncompiles=new_dom.getElementsByTagName("ClCompile")
rcompiles_filters=real_dom_filter.getElementsByTagName("ClCompile")
ncompiles_filters=new_dom_filter.getElementsByTagName("ClCompile")
i=0;
for it in rcompiles:#TODO;
#print rcompiles[i].parentNode.nodeName,rcompiles[i].getAttribute('Include')
if rcompiles[i].parentNode.nodeName!="ItemGroup":
#print "continue"
i=i+1;
continue;
val=rcompiles[i].getAttribute('Include');
pa=rpath+val;
nval=pathRelative(npath,pa);
'''
if newfile.find("Naruto.vcxproj")>-1:
print 'ClCompile',val;
print nval;'''
ncompiles[i].setAttribute('Include',nval);
setFilters(ncompiles_filters,rcompiles_filters,val,nval);
i=i+1;
rincludes=real_dom.getElementsByTagName("ClInclude")
nincludes=new_dom.getElementsByTagName("ClInclude")
rincludes_filters=real_dom_filter.getElementsByTagName("ClInclude")
nincludes_filters=new_dom_filter.getElementsByTagName("ClInclude")
i=0;
for it in rincludes:#TODO;
if rincludes[i].parentNode.nodeName!="ItemGroup":
#print "continue"
continue;
val=rincludes[i].getAttribute('Include');
#print 'ClInclude',val;
pa=rpath+val;
nval=pathRelative(npath,pa);
nincludes[i].setAttribute('Include',nval);
setFilters(nincludes_filters,rincludes_filters,val,nval);
i=i+1;
outdir=buildir+"$(Configuration).$(Platform)\\";
intdir=buildir+"_int\\$(ProjectName)_$(Configuration).$(Platform)\\";
logdir=buildir+"_log\\$(ProjectName)_$(Configuration).$(Platform)";
items=new_dom.getElementsByTagName("OutDir")
for item in items:
item.childNodes[0].nodeValue=outdir;
items=new_dom.getElementsByTagName("IntDir")
for item in items:
item.childNodes[0].nodeValue=intdir;
items=new_dom.getElementsByTagName("Path")
for item in items:
item.childNodes[0].nodeValue=logdir;
items=new_dom.getElementsByTagName("PlatformToolset");
parnodes=[];
for item in items:
if not item.parentNode in parnodes:
parnodes.append(item.parentNode);
for par in parnodes:
for item in items:
if item.parentNode!=par:
continue;
is142=False;
#print item.childNodes[0].nodeValue;
if item.childNodes[0].nodeValue==toolset:
#print it.nodeType;
is142=True;
break;
if not is142:
version_node=new_dom.createElement('PlatformToolset')
par.appendChild(version_node)
name_text=new_dom.createTextNode(toolset);
version_node.appendChild(name_text);
prettyxml=new_dom.toprettyxml()
f=open(newfile,"w")
f.write(prettyxml)
f.close()
prettyxml=new_dom_filter.toprettyxml()
f=open(newfile+".filters","w")
f.write(prettyxml)
f.close()
def listUsers(path):
slnfiles=[];
walk_files(path,slnfiles,".user");
nwriteArrayToFile(slnfiles,getCurrentPath()+"./vsusers.txt");
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding( "utf-8" )
ss=u"请关注全国留守儿童,请关注全国城乡差距,请关注全国教育现状......"
print ss
cmd = "TITLE "+ss
os.system(cmd.encode('gb2312'))
print "author cclin 2015.04/modify 2021.3.16"
print "support:e-mail=12092020@qq.com"
print "copyright 2015~2030 for anyone to use"
#print trimyin(trimyin('"abc"'));
#getRootDir("E:/_work/cocos2d-x-3.7/cocos/audio");
path = getCurrentPath();
#listVSProjs(path);
#pathRelative(r"D:\Cocos\"+projroot+"\Naruto2.2.5\proj.win32","D:\Cocos\cocos2d-x\cocos2dx\proj.win32");
copyProjects(path);
#listSolution(path);
#listUsers(path); |
def getInv(N):
nums = [1]*(N + 1)
inv = [0] * (N + 1)
inv[0] = 1
inv[1] = 1
for i in range(2, N + 1):
inv[i] = (-(Q // i) * inv[Q % i]) % Q
nums[i] = nums[i-1]*i%Q
return nums, inv
K, N = map( int, input().split())
Q = 998244353
fuct, invs = getInv(N+K)
c = fuct[N+K-1]*invs[N]*invs[K-1]%Q
for i in range(2,2*K+1):
ans = 0
if i%2 == 1:
for j in range(1, i//2+1):
ans += pow(2,j-1,Q)*fuct[N+K-2-j]*invs[K-j]*invs[N-2]
ans %= Q
else:
for j in range(1, i//2):
ans += pow(2,j-1,Q)*fuct[N+K-2-j]*invs[K-j]*invs[N-2]
ans %= Q
k = i//2
ans += pow(2,k-1,Q)*fuct[N+K-k-2]*invs[K-k]
ans %= Q
print((c-ans)%Q)
|
acronym_list = []
for line in open('datasets/output/AnonymizedClinicalAbbreviationsAndAcronymsDataSet.txt', 'r', encoding="utf8"):
acronym = line.split('|')[0]
full_wordphrase = line.split('|')[1]
both_acronym_and_wordphrase = acronym+'|'+full_wordphrase
both_acronym_and_wordphrase = both_acronym_and_wordphrase.strip('\n')
if both_acronym_and_wordphrase not in acronym_list:
acronym_list.append(both_acronym_and_wordphrase)
for line in open('./datasets/acronyms.txt', 'r', encoding="utf8"):
acronym = line.split('|')[0]
full_wordphrase = line.split('|')[1]
both_acronym_and_wordphrase = acronym+'|'+full_wordphrase
both_acronym_and_wordphrase = both_acronym_and_wordphrase.strip('\n')
if both_acronym_and_wordphrase not in acronym_list:
acronym_list.append(both_acronym_and_wordphrase)
for line in open('./datasets/acronyms.txt', 'r', encoding="utf8"):
acronym = line.split('|')[0]
both_acronym_and_wordphrase = acronym+'|'+full_wordphrase
both_acronym_and_wordphrase = both_acronym_and_wordphrase.strip('\n')
if both_acronym_and_wordphrase not in acronym_list:
acronym_list.append(both_acronym_and_wordphrase)
print(len(acronym_list))
f = open('datasets/output/final_acronyms_lookup_table.txt', 'a+')
acronym_list = sorted(acronym_list)
for i in acronym_list:
f.write(i+'\n') |
from viola.core.event_loop import EventLoop
from viola.wsgi.server import WSGIServer
from viola.core.scheduler import Scheduler
# from wsgi_flask_test import app
from wsgi_bottle_test import app
# import os
if __name__ == '__main__':
event_loop = EventLoop.instance(Scheduler.instance())
server = WSGIServer(event_loop)
server.set_wsgi_app(app)
server.bind(host="10.211.55.25", port=2333)
server.listen(9128)
# server.start(os.cpu_count())
server.start(1)
event_loop.start()
|
from plays import *
import sys
playbook_config_newVrf_fp = { "validatePlays": [ play_validate_newVrf_fp ], "playGroups": [ [ { "play": play_configBuild_newVrf_fp, "printHostName": True } ] ] }
playbook_config_newOspfL3Out_dsFw_fp = { "validatePlays": [ play_validate_newOspfL3Out_dsFw_fp ], "playGroups": [ [ { "play": play_configBuild_newInterfaceLoopback_fp, "printHostName": True }, { "play": play_configBuild_newOspf_fp, "printHostName": False }, { "play": play_configBuild_newInterfaceVlanXferFw_fp, "printHostName": False } ] ] }
playbood_config_newNetwork_fp = { "validatePlays": [ play_validate_newNetwork_fp ], "playGroups": [ [ {"play": play_configBuild_newNetwork_fp, "printHostName": True } ] ] }
def getPlaybook(playbookName):
switcher = {
"config_newVrf_fp": playbook_config_newVrf_fp,
"config_newOspfL3Out_dsFw_fp": playbook_config_newOspfL3Out_dsFw_fp,
"config_newNetwork_fp": playbood_config_newNetwork_fp,
}
return switcher.get(playbookName, None )
def runPlaybook(playbook,inputPlaybook, GroupVar):
renderedTasks = []
for validatePlay in playbook['validatePlays']:
for role in validatePlay['roles']:
if role['role']['task']['function'](role,inputPlaybook) == -1:
sys.exit()
for playGroup in playbook['playGroups']:
for hostName in inputPlaybook['hostslist']:
for play in playGroup:
for role in play['play']['roles']:
renderedTasks.append(role['role']['task']['function'](play['printHostName'],hostName,inputPlaybook,GroupVar))
return renderedTasks |
# !/usr/bin/env python
# tasks: fit SB, fit kT, estimate Mass, csb, w, ErrorCenterX
# Obs.: don't forget to activate the ciao enviroment!
from astropy.io.fits import getdata
from astropy.table import Table
import astropy.io.ascii as at
import matplotlib.pyplot as plt
import matplotlib
import astropy.units as u
from astropy.cosmology import FlatLambdaCDM
from ciao_contrib.runtool import *
import pycrates # import crates ciao routines
import sys, os
import logging
import time
import subprocess
import numpy as np
from scipy.interpolate import interp1d
from numpy.random import poisson
import fit
import preAnalysis
#--- cosmologia
h = 0.7
cosmo = FlatLambdaCDM(H0=h*100, Om0=0.3)
#--- constants
Msol = 1.98847e33
DEG2RAD=np.pi/180.0
kpc_cm = 3.086e21
# Funções básicas
def AngularDistance(z):
DA = float( (cosmo.luminosity_distance(z)/(1+z)**2)/u.Mpc ) # em Mpc
return DA
#--- Convertion function: kpc to physical
def kpcToPhy(radius,z,ARCSEC2PHYSICAL=0.492):
DA = AngularDistance(z)
radius = radius/1000 # Mpc
res = ( (radius/DA)/DEG2RAD )*3600/ARCSEC2PHYSICAL
return res
#--- Critical universe density
def rhoc(z):
rho_c = float(cosmo.critical_density(z)/(u.g/u.cm**3)) # em g/cm**3
return rho_c
#--- Função da evolução do redshift
def E(z):
res = cosmo.H(z)/cosmo.H(0)
return res
def writeStringToFile(fileName, toBeWritten):
# create file if it does not exist
if not os.path.isfile(fileName):
with open(fileName, 'w') as f:
f.write( '{toBeWritten}\n'.format(toBeWritten=toBeWritten) )
# else, append to file
else:
with open(fileName, 'a') as f:
f.write( '{toBeWritten}\n'.format(toBeWritten=toBeWritten) )
def saveFinalOutput(fileName,values):
values_str = values.split(',')
if not os.path.isfile(fileName):
header = '# Name, Xra, Xdec, kT, r500, M500, Mg500'
writeStringToFile(fileName,header)
writeStringToFile(fileName,values_str)
else:
writeStringToFile(fileName,values_str)
def checkOutput(fileName,check):
checkName, checkValue = check.split(': ')
text = open(fileName,'r').read()
lines = text.split('\n')
## check if the value already exists
found = False
for line in lines:
nameValue = line.split(': ')
if len(nameValue)>1:
name, value = nameValue
if name==checkName:
found = True
old_nameValue = nameValue
new_nameValue = check
if found:
## Switch value
new_text = text.replace(old_nameValue,new_nameValue)
with open(fileName, 'w') as f:
f.write(new_text)
else:
with open(fileName, 'a') as f:
f.write( '{toBeWritten}\n'.format(toBeWritten=new_nameValue) )
def saveOutput(names,values,out='output.txt'):
'''Save an ouptut name value into a section in the output file
'''
if not os.path.isfile(out):
new_data = Table()
for col,val in zip(names,values):
new_data[col] = [val]
else:
new_data = Table.read(out,format='ascii',delimiter=',')
old_cols = new_data.colnames
## if columns does not exists
notCommonCols = [element for element in names if element in old_cols]
if len(notCommonCols)>0:
new_data.add_row(new_data[-1])
for col,val in zip(names,values):
new_data[col][-1] = val
else:
for col,val in zip(names,values):
new_data[col] = val
## save
new_data.write(out,format='ascii',delimiter=',',overwrite=True)
# def saveOutput(name,value,section=None,out='output.txt'):
# '''Save an ouptut name value into a section in the output file
# '''
# toBeWritten = '{item}: {value}\n'.format(item=name,value=value)
# checkOutput(out,toBeWritten)
def saveBeta(pars,out,model='modBeta'):
pars_str = ' '.join(str(round(pars[i],5)) for i in range(len(pars)))
if not os.path.isfile(out):
with open(out, 'w') as f:
f.write('#This file is the ouput of the beta sb profile fit \n')
f.write('#The first line is the best fit \n')
if model=='Beta':
f.write('#rc beta n0 chisq\n')
if model=='modBeta':
f.write('#rc rs alpha beta epsilon gamma n0 bkg chisq\n')
writeStringToFile(out,pars_str)
else:
writeStringToFile(out,pars_str)
def getBeta(out):
tmp = np.loadtxt(out)
tmp_lis = [tmp[-1,i] for i in range(len(tmp[-1]))]
return tmp_lis
def getObsid(obsid):
lis = obsid.split(',')
res = [int(ij) for ij in lis]
return res
def checkDirs(dirList):
'''Check if a list o files exists
'''
idx = np.empty(0,dtype=int)
for i in range(len(dirList)):
Dir = dirList[i]
if os.path.exists(Dir):
idx = np.appsaveBetaend(i,idx)
return idx
def checkObsid(obsid):
'''It checks the obsid variable type.
It returns in two differnt types, list and string.
'''
if isinstance(obsid,str):
res_lis = getObsid(obsid)
return obsid,res_lis
elif isinstance(obsid,list):
if len(obsid) > 1:
res_str = ','.join(obsid)
else:
res_str = str(obsid)
return res_str,obsid
elif isinstance(obsid,int):
res_str = str(obsid)
res_lis = [res_str]
return res_str,res_lis
else:
logging.error('Chandra obsid={} format is not valid! Please try the following formats: int, str or list'.format(obsid))
pass
def checkImg(img):
if not os.path.isfile(img):
logging.critical('Image file was not found:{}.'.format(img))
exit()
else:
pass
def getDir(name,path):
nameDir = os.path.join(path,name)
if not os.path.exists(nameDir):
os.makedirs(nameDir)
nameDir = os.path.relpath(nameDir)
return nameDir
def anel(x0,y0,r0,rphy,step,region):
rbin = np.arange(r0,rphy,step)
with open(region, 'w') as fout:
for i in range(len(rbin)-1):
inrad, outrad = rbin[i],rbin[i+1]
print('annulus(%.3f,%.3f,%.2f,%.2f)'%(x0,y0,inrad,outrad),file=fout )
def abertura(x0,y0,rphy,region):
with open(region, 'w') as fout:
print('circle(%.2f,%.2f,%.2f)'%(x0,y0,rphy),file=fout )
def makePlotBeta(infile,betapars,name,rbkg=0,model='modBeta',outdir='./'):
'''Given a radial profile file and the model parameters it plots the electronic profile
'''
dirname = os.path.dirname(infile)
rprof = pycrates.read_file(infile)
r = pycrates.copy_colvals(rprof,"R")
y = pycrates.copy_colvals(rprof,"SUR_BRI")
dy = pycrates.copy_colvals(rprof,"SUR_BRI_ERR")
x = 0.492*0.5*(r[:,0] + r[:,1])
if model=='Beta':
# Beta Model
ym = betapars[2] * (1 + (x/(betapars[0]*0.492))**2)**(0.5-3*betapars[1])+betapars[3]
Label = r'$\beta$-model'
if model=='modBeta':
# Beta Model modified Maughan et al. 2008
rc,rs,alpha,beta,epsilon,gamma,n0,bkg,chisqr = betapars
ym = fit.S_bkg(x,(rc*0.492),(rs*0.492),alpha,beta,epsilon,gamma,n0,bkg)
# ym = (np.max(y)/np.max(ym))*ym
Label=r'adapted-$\beta$-model'
doPlotModel(x,y,ym,y_obs_err=dy,name=name,rbkg=rbkg,label=Label,outdir=outdir)
return x,y,dy,ym
def doPlotModel(r,y_obs,y_model,y_obs_err=None,name='',rbkg=0,label=r'adapted-$\beta$-model',outdir='./'):
heights = [6,1]
gs_kw = dict(height_ratios=heights)
f, (ax1,ax3)=plt.subplots(figsize=(12,10),ncols=1, nrows=2, sharex=True,gridspec_kw=gs_kw )
# f, (ax1,ax3)=plt.subplots(ncols=1, nrows=2, sharex=True,gridspec_kw=gs_kw )
# f.suptitle('Perfil radial de brilho superficial - A2142')
f.suptitle('radial profile - {}'.format(name))
#data and fit_opt
ax1.errorbar(r, y_obs, yerr=y_obs_err, fmt='.', capsize=5, mec='dimgrey', mfc='dimgrey', \
ms=6, elinewidth=1, ecolor='dimgrey' )
ax1.plot(r, y_model, label=label, color='indianred')
ax1.axvline(rbkg,linestyle='--',color='k')
# ax1.set_xlim(r.min(),100*r.max())
# ax1.set_ylim(y_obs.min()/10,y_obs.min()*10)
ax1.set_yscale('log')
ax1.set_ylabel(r'Surface Brightness (counts / $pixel^{2}$)')
ax1.legend(loc='best')
resid = (y_obs-y_model)/y_model
ax3.plot(r,resid, linestyle='',marker='.', color='indianred')
ax3.axhline(y=0, linestyle='--',marker='', color='dimgrey')
# ax3.set_xlim(5,10*r.max())
ax3.set_xscale('log')
ax3.set_ylim(-0.3,0.3)
ax3.set_title('Residue', pad=-10., fontsize=8)
ax3.set_xlabel('Radius (arcsec)')
ax3.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax3.get_xaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
nome_fig= os.path.join(outdir,name+'_sb.png')
plt.savefig(nome_fig)
def scaleRelation(Yx,redshift):
#--- Calculando Massa do Halo (relação de escala) Maughan et al. 2012
AYM, BYM, CYM = 5.77/(h**(0.5)), 0.57, 3*1e14
M500 = E(redshift)**(-2/5)*AYM*(Yx/CYM)**(BYM) # em 10e14*Msolares
rho_c = rhoc(redshift)
r500 = ((1e14*M500*Msol)/(4*np.pi/3)/rho_c/500)**(1/3)/kpc_cm
M500, r500 = round(float(M500),4),round(float(r500),4)
return M500, r500
def getCenter(img_mask,pos,unitsInput='deg',units='physical',outdir='./'):
'''Given an image and a position, it returns the position in the other coordinate system [units]
'''
dmcoords.punlearn()
X,Y=pos
if unitsInput=='deg':
a = dmcoords(img_mask, asol="non", option="cel", ra=X, dec=Y, celfmt='deg', verbose=1)
else:
a = dmcoords(img_mask, asol="non", option="sky", x=X, y=Y, celfmt='deg', verbose=1)
Xra, Xdec = round(float(dmcoords.ra),6), round(float(dmcoords.dec),6)
xphy, yphy = float(dmcoords.x), float(dmcoords.y)
xc, yc = float(dmcoords.logicalx), float(dmcoords.logicaly) # in image coordinates
if units=='deg':
out = Xra, Xdec
if units=='image':
out = round(xc), round(yc)
if units=='physical':
out = round(xphy), round(yphy)
return out
def computeCsb(r500vec,betapars,model='modBeta'):
if model=='Beta':
# Beta Model modified Maughan et al. 2008
rc,b,n0,bkg,chisqr = betapars
res = fit.SBeta(r500vec,rc,b,1)
if model=='modBeta':
# Beta Model modified Maughan et al. 2008
rc,rs,a,b,e,g,n0,bkg,chisqr = betapars
res = fit.S(r500vec,rc,rs,a,b,e,g)
mask = r500vec<=0.15*np.max(r500vec)
SB500 = np.sum(res)
SBcore = np.sum(res[mask])
csb = SBcore/SB500
return csb
def center(img,x0,y0,rphy):
dirname = os.path.dirname(img)
region = os.path.join(dirname,"aper.reg")
toto = os.path.join(dirname,"toto.fits")
totog = os.path.join(dirname,"totog.fits")
# Extaindo imagem dentro do círculo
abertura(x0,y0,rphy,region)
dmcopy(img+"[sky=region(%s)]"%(region),toto,clob=True)
aconvolve(toto,totog,'lib:gaus(2,5,1,10,10)',method="fft",clobber=True)
dmstat.punlearn()
bla = dmstat(totog, centroid=True)
pos = (dmstat.out_cntrd_phy)
pos = pos.split(',')
## Definindo x e y
x, y = float(pos[0]), float(pos[1])
return x,y
def centroid_shift(img,x0,y0,r500):
## Achando centroide
centroid = []
ri, rfim, dr = 0.15*r500,r500,0.05*r500
rbin = np.arange(ri,rfim+dr,dr)
# rbin = np.flip(rbin,axis=0)
for i in range(len(rbin)):
xr, yr = center(img,x0,y0,rbin[i])
# print("rbin:",rbin[i])
# xr, yr = centroX(img,x0,y0,rbin[i])
centroid.append([xr,yr])
centroid = np.array(centroid)
offset = ((centroid[:,0]-x0)**2+(centroid[:,1]-y0)**2)**(1/2)
## Estimativa do centroid shift
w = offset.std(ddof=1)/r500
return w*1e3
def noise(infits,outimage,mask=None):
""" Module to add poisson noise to the image data
Input: Fits image
Output: Fits image - An image with possion randon noise
"""
ctimg = pycrates.read_file(infits)
img = ctimg.get_image()
pix = img.values
noiseimg = poisson(pix)
if mask != None:
bla = pycrates.read_file(mask)
msk_values = bla.get_image().values
msk = msk_values == 0
noiseimg[msk] = msk_values[msk]
img.values = noiseimg
pycrates.write_file(ctimg,outimage,clobber=True)
def getNoiseImages(img,N=20,mask=None,pointSource=None,outdir='./noise/'):
''' it produces N images with a poissonian noise'''
for i in range(1,N+1):
img_noise = os.path.join(outdir,"sb_%03i.img"%(i))
if not os.path.isfile(img_noise):
noise(img,img_noise,mask=mask)
if pointSource is not None:
for i in range(1,N+1):
img_mask = os.path.join(outdir,"sb_%03i_mask.img"%(i))
img_noise = os.path.join(outdir,"sb_%03i.img"%(i))
if not os.path.isfile(img_mask):
dmcopy(img_noise+"[exclude sky=region(%s)]"%(pointSource),img_mask,clobber=True)
## --------------------------------------------------------
######################## Main taks ########################
## --------------------------------------------------------
def fitSB(rprof_file,model='modBeta',name='Abell',outdir='./',par0=None):
'''It fits a SB density profile. There are 3 model.
model=['Beta','doubleBeta','modBeta']
To Do : doubleBeta
'''
# fitDir = getdata(outdir,'fit')
if model=='Beta':
betapars = fit.fitBeta(rprof_file)
if model=='modBeta':
betapars = fit.fitBetaM(rprof_file,par0=par0)
if betapars[0]>betapars[1]:
print('rc is less than rs')
rc, rs, alpha, beta, epsilon, gamma, n0, bkg, chisqr = betapars
betapars = fit.fitBetaM(rprof_file,par0=[rs, 10*rc, alpha, beta, epsilon, gamma, n0, bkg])
return betapars
def fitTemperatureX(obsid_lis,z,center,radius=500,name='source',outdir='./',dataDownPath='./',core=True):
nObs = len(obsid_lis)
## Check spec dir
specroot = getDir('spec',outdir)
Xra, Xdec = center
## Find Arcsec to physical units
rphy = kpcToPhy(radius,z) ## 500 kpc in physical units (default)
# rphy = kpcToPhy(500,z) ## 500 kpc in physical units (default)
# core=False
## Input files
evt_mask_lis = [os.path.join(dataDownPath,'{}'.format(obsid),'repro',"{}_evt_gti_mask.fits".format(obsid)) for obsid in obsid_lis]
# blk_evt_lis = [os.path.join(outdir,"img","{}_blank.evt".format(obsid)) for obsid in obsid_lis]
blk_evt_lis = [os.path.join(outdir,'img',"{}_blank.evt".format(obsid)) for obsid in obsid_lis]
## Output files
phafile = os.path.join(specroot,'%s_src.pi'%(name))
spec_out = os.path.join(specroot,'spec.txt') ## output fit
core_vec = [os.path.join(specroot,"%s_core.reg"%(obsid)) for obsid in obsid_lis] ## region files
for i in range(nObs):
dmcoords(evt_mask_lis[i], asol="non", option="cel", ra=Xra, dec=Xdec, verbose=1)
xobs, yobs = float(dmcoords.x), float(dmcoords.y)
if core:
anel(xobs,yobs,0.15*rphy,rphy+1,0.85*rphy,core_vec[i])
else:
anel(xobs,yobs,0.05*rphy,rphy+1,0.95*rphy,core_vec[i])
# abertura(xobs,yobs,rphy+1,core_vec[i])
fit.kT_prep(obsid_lis[i],evt_mask_lis[i],blk_evt_lis[i],core_vec[i],specroot)
spec_lis = ','.join( os.path.join(specroot,'%s.pi'%(obsid)) for obsid in obsid_lis )
combine_spectra(src_spectra=spec_lis,outroot=os.path.join(specroot,"%s"%(name)),bscale_method='asca',clobber=True)
dmhedit(infile=phafile, filelist="", operation='add', key='ANCRFILE', value='%s_src.arf'%(name))
dmhedit(infile=phafile, filelist="", operation='add', key='RESPFILE', value='%s_src.rmf'%(name))
norm, kT, ksqr = fit.fit_kT(phafile,5.,z,spec_out)
if kT>20:
print('Temperature Fit Error!')
ksqr=20
return norm, kT, ksqr
def massX(obsid_lis,z,center,radial_profile,kT_0=5,r0=500,rbkg=1000,model='modBeta',name='Abell',outdir='./',dataDownPath='./'):
""" Given a ...
it estimates the M500
"""
## Check fit dir
# outDir = getDir(outdir,'output')
outDir = outdir
currentPath = os.getcwd()
dirCheck = os.path.join(currentPath,'check')
sb_plot_dir = getDir('sb',dirCheck)
## output sb parameters
out = os.path.join(outDir,'{}.txt'.format(model))
DA = AngularDistance(z) # em kpc
ARCSEC2kpc = ( (1/3600)*DEG2RAD )*1000*DA # kpc/arcsec
phy2cm = (ARCSEC2kpc*0.492)*kpc_cm # (kpc/arcsec)/(physical/arcsec)
## Convert radius to physical units
r0phy = kpcToPhy(r0,z) ## kpc to phsyical units
r1000phy = kpcToPhy(1000,z) ## 1000 kpc in physical units
## Fit SB
## cut at the background radius
rprof = radial_profile.split('.fits')[0]+'_cut.fits'
dmcopy(radial_profile+'[rmid<=%.2f]'%(rbkg),rprof,clobber=True)
if model=='Beta':
betapars = fitSB(rprof,model=model,name=name,outdir=outDir)
saveBeta(betapars,out,model=model)
if model=='modBeta':
rc0,beta0,n0,bkg0,chisqr0 = fit.fitBeta(radial_profile)
rs0,alpha0,epsilon0,gamma0 = 2*r1000phy,0.1,2.,3.
par0 = [rc0,rs0,alpha0,beta0,epsilon0,gamma0,n0,1e-5]
# betapars = fitSB(rprof,model=model,name=name,outdir=outDir,par0=par0)
betapars = fitSB(radial_profile,model=model,name=name,outdir=outDir,par0=par0)
chisqr = betapars[-1]
saveBeta(betapars,out,model=model)
## Make a plot
makePlotBeta(radial_profile,betapars,name,rbkg=0.492*rbkg,model=model,outdir=sb_plot_dir)
conv = 100; count = 1
while (count<20):
print("step %i"%(count))
r500,r500phy = r0, r0phy
norm, kT, ksqr = fitTemperatureX(obsid_lis,z,center,radius=r500,name=name,outdir=outDir,dataDownPath=dataDownPath)
if ksqr>5:
norm, kT, ksqr = fitTemperatureX(obsid_lis,z,center,radius=1000.,name=name,outdir=outDir,dataDownPath=dataDownPath)
#--- Calculando n0
EI_xspec = 1e14*norm*4*np.pi*(DA*1e3*kpc_cm*(1+z))**2
EI_model = fit.EI(r500phy,betapars,phy2cm,model=model)
n0 = ( EI_xspec / EI_model )**(1/2) ## 1/cm^3
#--- Calculando Mg em R500
Mg500 = fit.Mgas(r500phy,betapars,n0,phy2cm,model=model)
#--- Calculando Yx
Yx = 1e13*Mg500*kT
M500, r500 = scaleRelation(Yx,z)
conv = round(100*np.abs(r500-r0)/r0,2)
r0, r0phy = r500, kpcToPhy(r500,z)
count += 1
print(25*'--')
print('%s'%(name))
print("n0:",n0,"cm^-3")
print("Mg500:",Mg500,"10^13 solar masses")
print("M500:",M500,"10^14 solar masses")
print("r500:",r500,"kpc")
print("kT:",round(kT,2),"keV")
print("The convergence is:",conv,"%")
print(25*'--')
if conv<1.0:
break
output = os.path.join(outdir,'log.txt')
cols = ['kT','R500','Mg500','M500','n0']
values = [kT,r500,Mg500,M500,n0]
saveOutput(cols,values,out=output)
## Switch n0
if model=='modBeta':
rc,rs,a,b,e,_,g,bkg,chisqr = betapars
betapars = [rc,rs,a,b,e,n0,g,bkg,chisqr]
if model=='Beta':
rc,_,bkg,chisqr = betapars
betapars = [rc,b,n0,bkg,chisqr]
saveBeta(betapars,out,model=model)
return kT, r500, Mg500, M500, betapars
def csb(betapars,r500,z,outdir='./'):
r500phy = kpcToPhy(r500,z) ## kpc to phsyical units
r500vec = np.arange(2,r500phy,1)
csb = computeCsb(r500vec,betapars,model='modBeta')
output = os.path.join(outdir,'log.txt')
saveOutput(['csb'],[csb],out=output)
# saveOutput('csb',csb,out=output)
print('csb:',csb)
return csb
def centroidShift(img,center_peak,r500,rmax,z,outdir='./'):
r500phy = kpcToPhy(r500,z) ## kpc to phsyical units
r30kpc = kpcToPhy(30,z)
noiseroot = getDir('noise',outdir)
## center
xpeak, ypeak = getCenter(img,center_peak,unitsInput='deg',units='physical')
## Excluindo região central dentro de 30kpc
core = os.path.join(noiseroot,'core.reg')
abertura(xpeak, ypeak, r30kpc, core)
## Check noise images
N=100
getNoiseImages(img,N=N,outdir=noiseroot)
w = []
## Definindo cascas
rt = np.min([r500phy,rmax])
for i in range(1,N+1):
noisei = os.path.join(noiseroot,"sb_%03i.img"%(i))
res = centroid_shift(noisei,xpeak,ypeak,rt)
w.append(res)
wvalue = np.mean(np.array(w))
werr = np.std(np.array(w))
print("<w>, w_err : ( %.3f +/- %.3f )1e-3"%(wvalue, werr))
output = os.path.join(outdir,'log.txt')
saveOutput(['w','werr'],[wvalue,werr],out=output)
return wvalue,werr
def errorCenterX(img,center,psreg,z,radius=500,outdir='./'):
'''Estimate the error in the X-ray center and X-ray peak
'''
rphy = kpcToPhy(radius,z) ## kpc to phsyical units
r10kpc = kpcToPhy(10,z)
DA = AngularDistance(z) # em kpc
noiseroot = getDir('noise',outdir)
## Get the new center
xcen, ycen = getCenter(img,center,unitsInput='deg',units='physical') # Get an initial center
xcen, ycen = preAnalysis.findCentroX(img,xcen,ycen,rphy) ## Find the center at the given radius
xpeak,ypeak= preAnalysis.findXrayPeak(img,xcen,ycen,rphy) ## Find the center at the given radius
## Check noise images
N=20
getNoiseImages(img,N=N,pointSource=psreg,outdir=noiseroot)
position = []
position2 = []
for i in range(1,N+1):
img_mask = os.path.join(noiseroot,"sb_%03i_mask.img"%(i))
res = preAnalysis.findCentroX(img_mask,xcen,ycen,rphy)
res2 = preAnalysis.findXrayPeak(img_mask,xcen,ycen,rphy,rSigma=r10kpc)
position.append(res)
position2.append(res2)
# position.append([res,res2])
position = np.array(position);position2 = np.array(position2)
position = np.array(position)
ARCSEC_kpc = 0.492*( (1/3600)*DEG2RAD )*1000*DA # kpc/arcsec
std_cen = ARCSEC_kpc*(np.std(position[:,0])**2+np.std(position[:,1])**2)**(1/2)
std_peak = ARCSEC_kpc*(np.std(position2[:,0])**2+np.std(position2[:,1])**2)**(1/2)
# std_cen = ARCSEC_kpc*(np.std(position[:,0])**2+np.std(position[:,1])**2)**(1/2)
# std_peak = ARCSEC_kpc*(np.std(position[:,2])**2+np.std(position[:,3])**2)**(1/2)
img_mask = os.path.splitext(img)[0]+'_mask'+os.path.splitext(img)[1]
Xra, Xdec = getCenter(img_mask,[xcen,ycen],unitsInput='physical',units='deg')
Xra_peak, Xdec_peak = getCenter(img_mask,[xpeak,ypeak],unitsInput='physical',units='deg')
## Save output
output = os.path.join(outdir,'log.txt')
hdr = 'xcen,ycen,xpeak,ypeak'
np.savetxt(os.path.join(outdir,'center_peak.txt'),position,header=hdr,fmt='%4f')
# np.savetxt(os.path.join(outdir,'center.txt'),position,fmt='%4f')
# np.savetxt(os.path.join(outdir,'xpeak.txt'),position2,fmt='%4f')
saveOutput(['errorCenter'],[std_cen],out=output)
print("X-ray center:", Xra,Xdec, " +/- ",std_cen,' [kpc]')
return Xra, Xdec, std_cen, Xra_peak, Xdec_peak
if __name__ == '__main__':
print('Analysis.py')
print('author: Johnny H. Esteves')
# def doPlotBetaM(infile,pars,rbkg=100,name='RM'):
# dirname = os.path.dirname(infile)
# rprof = read_file(infile)
# # make_figure(infile+"[cols r,CEL_BRI]","histogram")
# r = copy_colvals(rprof,"R")
# y = copy_colvals(rprof,"CEL_BRI")
# dy = copy_colvals(rprof,"CEL_BRI_ERR")
# bgy = copy_colvals(rprof,"BG_CEL_BRI")
# # bdy = copy_colvals(rprof,"BG_CEL_BRI_ERR")
# x = 0.492*0.5*(r[:,0] + r[:,1])
# # Beta Model Modified
# rc,rs,alpha,beta,epsilon,n0,gamma,bkg,chisqr = pars
# ym = fit.S_bkg(x,(rc*0.492),(rs*0.492),alpha,beta,epsilon,gamma,n0,bkg)
# # ym = (np.max(y)/np.max(ym))*ym
# add_curve(x,ym,["symbol.style","none"])
# xr = np.append(x, x[::-1])
# yr = np.append(y+dy, (y-dy)[::-1])
# add_region(xr,yr,["fill.style","solid","fill.color","olive","depth",90])
# ## We take the second minimum and maximum value
# limits(Y_AXIS,0.9*np.min(y),1.1*np.max(yr))
# # limits(X_AXIS,np.min(x),np.max(x)+1)
# log_scale()
# bx = [0.1, 1000, 1000, 0.1]
# by = [0.90*np.mean(bgy), 0.90*np.mean(bgy), 1.10*np.mean(bgy), 1.10*np.mean(bgy)]
# add_region(bx,by,["fill.style","solid","fill.color","red","edge.style","noline","depth",80])
# add_curve(x,bgy,["symbol.style","square","symbol.size",2])
# add_vline(rbkg*0.492)
# set_plot_xlabel("r (arcsec)")
# set_plot_ylabel("Surface brightness (count arcsec^{-2})")
# set_plot_title(name+r" \chi^2_r = %.2f"%(chisqr))
# set_plot(["title.size",20])
# opts = { "clobber": True, "fittopage": True }
# opts['pagesize'] = 'letter'
# print_window(os.path.join(dirname,"%s.pdf"%(name)),opts)
# clear_plot()
|
#https://leetcode-cn.com/contest/weekly-contest-218/problems/concatenation-of-consecutive-binary-numbers/
#只要求得 最后长度在 len(modBinBase) 范围内的值即可,然后取异或
class Solution:
modBinBase = '111011100110101100101000000111'
def concatenatedBinary(self, n: int) -> int:
ss=""
for i in range(n+1):
ss+=(bin(i)[2:])
print(ss)
return int(ss,2) % 1000000007
solution = Solution()
#27
#n = 3
#505379714
n = 12
print(solution.concatenatedBinary(n))
|
pounds = float(input("Enter number of pounds: "))
kg= pounds*0.454
print("Number of Kilograms: ",kg) |
from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
# Create your models here.
LANGUAGES = (
('J', 'JavaScript'),
('H', 'HTML5'),
('C', 'CSS3'),
('P', 'Python'),
('S', 'SQL'),
('M', 'MongoDB')
)
class Project(models.Model):
project_name = models.CharField(max_length=300)
project_overview = models.CharField(max_length=2000)
languages = models.CharField(
max_length=6,
choices=LANGUAGES
)
def __str__(self):
return self.project_name
def get_absolute_url(self):
return reverse('projects_detail', kwargs={'pk': self.id})
class Developer(models.Model):
name = models.CharField(max_length=200)
description = models.CharField(max_length=200)
projects = models.ManyToManyField(Project)
user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("detail", kwargs={"developer_id": self.id})
|
# ticker ticks every 1/rate seconds, default 1/1000 s, and provides time for the world
import time
import threading
class Ticker(threading.Thread):
def __init__(self, rate=1000, max_ticks=5000, world=None):
threading.Thread.__init__(self)
self.rate = rate
self.max_ticks = max_ticks
self.current_tick = 0
self.entities = []
self.running = False
self.world = world
def register_entity(self, entity):
if "tick" in dir(entity):
print entity
self.entities.append(entity)
def run(self):
self.running = True
while self.running and self.current_tick < self.max_ticks:
self.current_tick = self.current_tick + 1
print "ticker - tick %d" % self.current_tick
for entity in self.entities:
entity.tick(self.world)
time.sleep(1 / self.rate)
def run_some(self, some_ticks):
self.running = True
local_ticks = 0
while self.running and self.current_tick < self.max_ticks:
self.current_tick = self.current_tick + 1
print "ticker - tick %d" % self.current_tick
for entity in self.entities:
entity.tick(self.world)
time.sleep(1 / self.rate)
local_ticks = local_ticks + 1
if local_ticks >= some_ticks:
self.running = False
def stop(self):
self.running = False
|
#因为参与了笑来老师管理的BOX定投,所以想知道长期稳定定投下来的收益是多少?
#下面是我的思考,长期更新
x = 3470 #初始资金,单位美元
y = 57.38 #每期投入资金
y_2 = 186.8 #测试用
mo = int() #目标资金数
num = int() #投资期数,一年52期
#money()这个函数的只实现了部分功能,需将week()函数的功能添加进来
def money(mo, num):
gth = (mo - x - num*y) / (x + num*y) #gth为growth的缩写,代表收益率
gth_y = (365*gth) / (7*num)
return round(gth_y, 4) #gth_y为年化收益率
print(money(100000, 156)) #235.66%的年化收益率可在3年内积攒到10万美金
#week_money()为money()的进阶版,体现了定投者场外赚钱能力的提升
#num代表总投资期数,156期为3年,每隔18周,单次定投金额增加14.38美元
def week_money(mo, num, w):
while num > 0: #num为总投资期数
num -= 18
w += 1 #w每隔18周加1,从0开始。代表定投金额增加
z = y + w*14.38 #每次定投金额
gth = (mo - x - 18*w*y) / (x + 18*w*y)
gth_y = (365*gth) / (18*7*w)
print(round(z, 2))
return round(gth_y, 4) #体现了定投者场外赚钱能力的收益率
week_money(100000, 156, 0) #219.95%的年化收益率可在3年内积攒到10万美金
#以后需要添加的功能列表
#1.查询BTC,EOS,XIN的历史年化收益率,加权后算出BOX的历史年化收益率,即为给定gth_y
#2.有了给定gth_y,初始本金,目标资金数,每期定投数,可算出需要多久才能实现目标
|
import cv2
from matplotlib import pyplot
original_image = cv2.cv2.imread("pexels.jpeg")
cv2.cv2.imshow("original image", original_image)
rgb_image = cv2.cv2.cvtColor(original_image, cv2.cv2.COLOR_BGR2RGB)
pyplot.imshow(rgb_image)
pyplot.show()
cv2.cv2.waitKey(0)
cv2.cv2.destroyAllWindows() |
# -*- coding: utf-8 -*-
import scrapy
import re
class MalaysiaSomdomSpider(scrapy.Spider):
name = 'malaysia_somdom'
allowed_domains = ['www.somdom.com/malay/t6411']
start_urls = ['http://www.somdom.com/malay/t6411/',
'http://www.somdom.com/malay/t6411-2',
'http://www.somdom.com/malay/t6411-3',
'http://www.somdom.com/malay/t6411-4',
]
def parse(self, response):
content = response.xpath('//*[@id="J_read_main"]//div/text()').extract()
fil = re.compile(u'[^a-zA-Z.-]+', re.UNICODE)
with open('C:\\Users\\Administrator\\Desktop\\word2.txt', 'a', encoding='utf8')as f:
for word in content:
word = word.strip()
if word:
malaysia_word = fil.sub(' ', word)
print(malaysia_word)
f.write(malaysia_word + ',') |
import os
print(os.path.abspath(os.curdir)) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-23 17:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50)),
('password', models.CharField(max_length=40)),
('role', models.CharField(choices=[('A', 'Admin'), ('R', 'Reviewer'), ('U', 'User')], max_length=2)),
],
),
]
|
from reporter_app import db
from flask_security import UserMixin, RoleMixin
from sqlalchemy import create_engine
from sqlalchemy.orm import relationship, backref
from sqlalchemy import Boolean, DateTime, Column, Integer, String, ForeignKey, UnicodeText, UniqueConstraint
from sqlalchemy.sql import func
import datetime
class RolesUsers(db.Model):
__tablename__ = 'roles_users'
__table_args__ = (UniqueConstraint('user_id', 'role_id'),)
user_id = Column('user_id', Integer(), ForeignKey('user.id', ondelete='CASCADE'), primary_key=True)
role_id = Column('role_id', Integer(), ForeignKey('role.id', ondelete='CASCADE'), primary_key=True)
create_datetime = Column(DateTime(), nullable=False, server_default=func.now())
update_datetime = Column(
DateTime(),
nullable=False,
server_default=func.now(),
onupdate=datetime.datetime.utcnow,
)
class Role(db.Model, RoleMixin):
__tablename__ = 'role'
id = Column(Integer(), primary_key=True)
name = Column(String(80), unique=True, nullable=False)
description = Column(String(255))
create_datetime = Column(DateTime(), nullable=False, server_default=func.now())
update_datetime = Column(
DateTime(),
nullable=False,
server_default=func.now(),
onupdate=datetime.datetime.utcnow,
)
class User(db.Model, UserMixin):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
email = Column(String(255), unique=True, nullable=False)
username = Column(String(255), unique=True, nullable=True)
password = Column(String(255), nullable=False)
first_name = Column(String(128), nullable=False)
surname = Column(String(128), nullable=False)
last_login_at = Column(DateTime())
current_login_at = Column(DateTime())
last_login_ip = Column(String(100))
current_login_ip = Column(String(100))
login_count = Column(Integer)
active = Column(Boolean())
fs_uniquifier = Column(String(255), unique=True, nullable=False)
confirmed_at = Column(DateTime())
roles = relationship('Role', secondary='roles_users',
backref=backref('users', lazy='dynamic'))
create_datetime = Column(DateTime(), nullable=False, server_default=func.now())
update_datetime = Column(
DateTime(),
nullable=False,
server_default=func.now(),
onupdate=datetime.datetime.utcnow,
)
def has_role(self, role):
return role in self.roles
class Co2(db.Model):
__tablename__ = 'co2'
date_time = Column(DateTime(), ForeignKey('elec_use.date_time', ondelete='CASCADE'), primary_key=True)
co2 = Column(db.Float)
usage = relationship('ElecUse', backref=backref('co2', lazy='dynamic'))
class ElecUse(db.Model):
_tablename__ = 'electricity_use'
date_time = Column(DateTime(), primary_key=True)
electricity_use = Column(db.Float)
class RealPowerReadings(db.Model):
__tablename__ = 'real_power_readings'
__table_args__ = (UniqueConstraint('date_time', 'device_name'),)
date_time = Column(DateTime(), primary_key=True)
device_name = Column(String(255), primary_key=True)
power = Column(db.Float)
power_generator = Column(Boolean())
create_datetime = Column(DateTime(), nullable=False, server_default=func.now())
update_datetime = Column(
DateTime(),
nullable=False,
server_default=func.now(),
onupdate=datetime.datetime.utcnow,
)
class RealSiteReadings(db.Model):
__tablename__ = 'real_site_readings'
date_time = Column(DateTime(), primary_key=True)
temperature = Column(db.Float)
power = Column(db.Float)
create_datetime = Column(DateTime(), nullable=False, server_default=func.now())
update_datetime = Column(
DateTime(),
nullable=False,
server_default=func.now(),
onupdate=datetime.datetime.utcnow,
)
class Trading(db.Model):
__tablename__='trading'
__table_args__ = (UniqueConstraint('date_time', 'Period'),)
date_time=Column("date_time" , DateTime(), primary_key=True)
period=Column("Period",Integer(), primary_key=True,nullable=False)
bid_units=Column("Bid Units Volume(kWh)",db.Float)
bid_type=Column("Bid type",String(255))
bid_price=Column("Bid Price",db.Float)
bid_outcome=Column("Bid outcome",db.Float)
class ClearoutPrice(db.Model):
__tablename__='clearout_price'
date_time=Column("date_time" , DateTime(), primary_key=True)
period=Column("Period",Integer(), primary_key=True,nullable=False)
closing_price=Column("Bid closing price",db.Float)
volume=Column("Volume", db.Float)
class PredictedPrice(db.Model):
__tablename__='predicted_price'
date_time=Column("Date, time" , DateTime(),unique=True, primary_key=True)
period=Column("Period",Integer(),nullable=False)
predicted_load=Column("Predicted grid load(MWh)",db.Float,nullable=False)
predicted_price=Column("Predicted market price",db.Float, nullable=False)
class ActualLoad(db.Model):
__tablename__='actual_load'
date_time=Column("Date, time" , DateTime(),unique=True, primary_key=True)
period=Column("Period",Integer(),nullable=False)
actual_generation=Column("Volume Generated onsite",db.Float)
actual_usage=Column("Volume consumed onsite",db.Float)
imbalance_vol=Column("Imbalance volume",db.Float)
imbalance_price=Column("Imbalance Price",db.Float)
net_profit=Column("Net profit",db.Float)
class ElecGen(db.Model):
_tablename__ = 'electricity_gen'
date_time = Column(DateTime(), primary_key=True)
wind_gen = Column(db.Float)
solar_gen = Column(db.Float)
|
import json
import matplotlib.pyplot as plt
import torch
import os
import numpy as np
# save the training parameters in a txt at the beginning of training
def save_params(par, model_dir, name):
data = {}
for att in dir(par):
if not att.startswith('__'):
data[att] = par.__getattribute__(att)
with open(model_dir + "parameters_"+name+".json", 'w') as outfile:
json.dump(data, outfile, indent=4)
def plot_loss(loss, epoch, iteration, step, loss_name, loss_dir, avg=False):
#x = range(1,iteration, step)
x = list(range(iteration))
#y = loss[1:iteration:step]
y = loss
#print "loss_name:", loss_name, " y: ", y
#print "loss ", loss[1:iteration:step]
plt.plot(x, y, 'b')
if avg: # plot the average of every k elements
k=10
if iteration>=k:
loss = np.asarray(loss, dtype=np.float32)
#print(loss.shape)
loss_mean = np.mean(loss.reshape(-1,k), axis=1)
#print(loss_mean.shape)
x_mean = list(range(k-1,iteration, k))
plt.plot(x_mean, loss_mean, 'r')
#plt.axis([0, iteration, 0, max(loss[1:iteration:step])+0.1])
plt.axis([0, iteration, 0, max(loss)])
plt.ylabel(loss_name + ' Loss')
plt.xlabel('Iter')
plt.title(loss_name)
#plt.show()
if not os.path.exists(loss_dir):
os.makedirs(loss_dir)
plt.savefig(loss_dir+loss_name+"_"+str(epoch)+"_"+str(iteration)+"_loss.png")
plt.clf()
def save_model(model, model_dir, model_name, train_iter):
model_path = model_dir+model_name+"_"+str(train_iter)+".pt"
torch.save(model, model_path)
print("Saved:", model_path)
def load_model(model_dir, model_name, test_iter, eval=True):
model_path = model_dir+model_name+"_"+str(test_iter)+".pt"
model = torch.load(model_path)
print("Loaded model:", model_path)
model.cuda()
if eval:
model.eval()
else:
model.train()
return model |
#!/usr/bin/env python
#-*-coding:utf-8-*-
'''
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are
an amicable pair and each of a and b are called amicable numbers.
For example, the proper divisors of 220 are
1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284.
The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
'''
from math import sqrt
def loop(n):
amicables = set()
for i in range(1, n):
a = get_sum_of_divisors(i)
b = get_sum_of_divisors(a)
if i == b and a != b:
amicables.add(a)
amicables.add(b)
return sum(amicables)
def get_sum_of_divisors(n):
divisors = [1]
for i in range(2, int(sqrt(n)+1)):
if i*i == n:
divisors.append(i)
elif n % i == 0:
divisors.append(n/i)
divisors.append(i)
return sum(divisors)
if __name__ == '__main__':
print loop(10000)
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
import re
# def multiple_replace(dict, text): #this have error when they're sticked e.g. && ||
# # Create a regular expression from the dictionary keys
# regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))
# # For each match, look-up corresponding value in dictionary
# return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
# dict = {
# " || " : " or ",
# " && " : " and "
# }
lines= int(input())
for _ in range(lines):
text = input()
text = re.sub(r"(?<= )&&(?= )", "and", text)
text = re.sub(r"(?<= )\|\|(?= )", "or", text)
print(text)
# print (multiple_replace(dict, input()))
# print (re.sub("(<!--.*?-->)", "", input())) #remove comment
|
from placement import Placement
from campaign import Campaign
from copyback import Copyback
from rebuild import Rebuild
from batch import Batch
from poisson import Poisson
from exponential import Exponential
from server import Server
from state import State
from disk import Disk
from heapq import *
#------------------------------------
# Simulations
#------------------------------------
class Simulate:
def __init__(self,mission_time, plus_one, num_servers, num_disks_per_server, num_spares_per_server, k, m, fb, dp_type, failure_type, mtbf, failure_percent, rebuildIO, slaTime, copybackIO, diskCap, useRatio):
#---------------------------
# compressed time window
#---------------------------
self.mission_time = mission_time
#---------------------------
# system and placement
#---------------------------
self.sys = Campaign(plus_one, num_servers, num_disks_per_server, num_spares_per_server, k, m, fb, dp_type, diskCap, useRatio)
self.place = Placement(self.sys)
#--------------------------------------
# fast rebuild + copyback phases
#--------------------------------------
self.rebuild = Rebuild(self.sys, rebuildIO)
self.copyback = Copyback(copybackIO, slaTime)
#--------------------------------------
# failures distribution and mtbf
#--------------------------------------
self.mtbf = mtbf
self.failure_type = failure_type
self.failure_percent = failure_percent
def reset(self):
#----------------------------------------------
# failures arrive by using poisson distribution
#----------------------------------------------
if self.failure_type == 0:
trace = Poisson(self.sys.num_disks, self.failure_percent, self.mtbf)
if self.failure_type == 1:
trace = Exponential(self.sys.num_disks, self.failure_percent, self.mtbf)
if self.failure_type == 2:
trace = Batch(self.sys.num_disks, self.failure_percent, self.mtbf, cascade_factor=10.0)
self.trace_entry = trace.generate_failures()
#------------------------------------------
# put the disk failures in the event queue
#------------------------------------------
self.events_queue = []
for disk_fail_time, diskId in self.trace_entry:
heappush(self.events_queue, (disk_fail_time, Disk.EVENT_FAIL, diskId))
print ">>>>> reset disk", diskId, Disk.EVENT_FAIL, "@",disk_fail_time
self.mission_time = disk_fail_time
print " - system mission time - ", self.mission_time
#------------------------------
# initialize the system state
#------------------------------
self.state = State(self.sys, self.rebuild, self.copyback, self.events_queue)
def get_next_wait_events(self):
events = []
#---------------------------------------------------------------------------------------
if self.sys.dp_type == 0 or self.sys.dp_type == 1 or self.sys.dp_type == 2:
#---------------------------------------------------------------------------------------
for serverId in self.sys.servers:
if self.state.servers[serverId].wait_queue:
avail_spares = self.state.servers[serverId].avail_spares
while avail_spares and self.state.servers[serverId].wait_queue:
print "\n@wait_queue in server [", serverId , "] avail spares:",self.state.servers[serverId].avail_spares
deviceset = []
next_event = heappop(self.state.servers[serverId].wait_queue)
#------------------------------------------
next_event_time = next_event[0]
next_event_type = next_event[1]
deviceset.append(next_event[2])
avail_spares -= 1
while self.state.servers[serverId].wait_queue and self.state.servers[serverId].wait_queue[0][0] == next_event_time and self.state.servers[serverId].wait_queue[0][1] == next_event_type and avail_spares > 0:
simultaneous_event = heappop(self.state.servers[serverId].wait_queue)
deviceset.append(simultaneous_event[2])
avail_spares -= 1
print ">>>>> pop server wait disk", deviceset, next_event_type, " - time - ", next_event_time
events.append((next_event_time, next_event_type, deviceset))
return events
def get_next_events(self):
#--------------------------------------------------------------
wait_events = self.get_next_wait_events()
if len(wait_events) > 0:
return wait_events
#--------------------------------------------------------------
if self.events_queue:
deviceset = []
next_event = heappop(self.events_queue)
#------------------------------------------
next_event_time = next_event[0]
next_event_type = next_event[1]
deviceset.append(next_event[2])
#----------------------------------------------
# gather the simultaneous failure/repair events
#----------------------------------------------
while self.events_queue and self.events_queue[0][0]==next_event_time and self.events_queue[0][1]==next_event_type:
simultaneous_event = heappop(self.events_queue)
deviceset.append(simultaneous_event[2])
print "\n\n>>>>> pop next event -", deviceset, next_event_type, next_event_time
return [(next_event_time, next_event_type, deviceset)]
else:
return [(None, None, None)]
def run_simulation(self, iterations_per_worker, traces_per_worker):
results = []
for one_iter in range(iterations_per_worker):
results.append(self.run_iteration(one_iter))
return results
def run_iteration(self, num_iter):
self.reset()
curr_time = 0
loss = 0
loopflag = True
eventDL = 0
while loopflag:
for each_event in self.get_next_events():
(event_time, event_type, deviceset) = each_event
#-----------------------------
# if invalid event, then exit
#-----------------------------
if event_time == None:
loopflag = False
break
#----------------------------------
# update the system time and state
#----------------------------------
if curr_time < event_time:
curr_time = event_time
#---------------------------
# exceed mission-time, exit
#---------------------------
if curr_time > self.mission_time:
loopflag = False
loss = self.place.calculate_dataloss(self.state)
break
#----------------------------------
self.state.update_clock(event_type, curr_time)
self.state.update_state(event_type, deviceset)
self.state.update_event(event_type, deviceset)
#-------------------------------------------------------
# degraded rebuild or copyback event, continue
#-------------------------------------------------------
if event_type == Disk.EVENT_DEGRADEDREBUILD or event_type == Disk.EVENT_COPYBACK:
continue
#------------------------------------------
# check the PDL according to failure events
#------------------------------------------
if event_type == Disk.EVENT_FAIL:
eventDL = eventDL + 1
if self.place.check_global_dataloss(self.state, deviceset):
print "############### data loss ##############", eventDL, "deviceset", deviceset, curr_time, ">>> unrecoverables - ", self.state.MTTDL, "\n"
return (self.state.MTTDL, loss)
|
from rest_framework import routers
from rest_framework.urlpatterns import format_suffix_patterns
from site_manage.views import Assign
from django.urls import path
from . import views
router = routers.SimpleRouter(trailing_slash=False)
router.register(r'site', views.SiteViewSet)
urlpatterns = [
path('assign', Assign.as_view())
]
urlpatterns += format_suffix_patterns(router.urls)
|
from bs4 import BeautifulSoup
import requests
import argparse
import requests.exceptions
from urllib.parse import urlsplit
from collections import deque
import re
'''
A script to scrape youtube links from a predefined website of choice.
'''
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--website", required=True,
help="URL to crawl")
args = vars(ap.parse_args())
#funtion to extract youtube link from web pages
urls=deque()
urls.append(args['website'])
def process_urls(urls_to_process):
processed_urls = set() # a set of urls that have already been crawled
youtube_links = set() # a set of extracted youtube links
while len(urls_to_process) > 0: # process urls one by one until we exhaust the queue
f = open('YoutubeLinks.txt','a')
url = urls_to_process.popleft() # move next url from the queue to the set of processed urls
processed_urls.add(url)
parts = urlsplit(url) # extract base url to resolve relative links
base_url = "{0.scheme}://{0.netloc}".format(parts)
path = url[:url.rfind('/')+1] if '/' in parts.path else url
print("Processing %s" % url)
try:
response = requests.get(url)
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError):
# ignore pages with errors
continue
new_youtube_link = set(re.findall(r"https://www.youtube.com/embed/[a-z,A-Z,0-9]+", response.text, re.I)) # extract all youtube_link addresses and add them into a set
youtube_links.update(new_youtube_link)
soup = BeautifulSoup(response.text) # create a beautifulsoup for the html document
if new_youtube_link:
f.write('\n'.join(new_youtube_link))
f.write('\n')
f.close()
#Extract more urls from current page after crawl :'<a> tags'
for anchor in soup.find_all("a"):
link = anchor.attrs["href"] if "href" in anchor.attrs else '' # extract link url from the anchor
use_link = ''
if link.startswith('base_url'): # ensure link is part of current domain not cross-site
use_link = link
if not use_link in urls_to_process and not use_link in processed_urls: # add the new url to the queue if it was not enqueued nor processed yet
urls_to_process.append(use_link)
return
process_urls(urls)
|
from unittest import TestCase, main
from os import remove
from os.path import exists, join, basename
from shutil import move
from biom import load_table
from pandas.util.testing import assert_frame_equal
from functools import partial
import numpy.testing as npt
from qiita_core.util import qiita_test_checker
from qiita_core.testing import wait_for_processing_job
from qiita_core.qiita_settings import qiita_config
import qiita_db as qdb
from json import dumps
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
@qiita_test_checker()
class TestAnalysis(TestCase):
def setUp(self):
self.analysis = qdb.analysis.Analysis(1)
self.portal = qiita_config.portal
_, self.fp = qdb.util.get_mountpoint("analysis")[0]
self.get_fp = partial(join, self.fp)
self.biom_fp = self.get_fp("1_analysis_dt-18S_r-1_c-3.biom")
self._old_portal = qiita_config.portal
self.table_fp = None
# fullpaths for testing
self.duplicated_samples_not_merged = self.get_fp(
"not_merged_samples.txt")
self.map_exp_fp = self.get_fp("1_analysis_mapping_exp.txt")
from glob import glob
conf_files = glob(join(qiita_config.plugin_dir, "BIOM*.conf"))
for i, fp in enumerate(conf_files):
qdb.software.Software.from_file(fp, update=True)
def tearDown(self):
self.analysis.artifacts[0].visibility = 'private'
qiita_config.portal = self.portal
with open(self.biom_fp, 'w') as f:
f.write("")
fp = self.get_fp('testfile.txt')
if exists(fp):
remove(fp)
if self.table_fp:
mp = qdb.util.get_mountpoint("processed_data")[0][1]
if exists(self.table_fp):
move(self.table_fp,
join(mp, "2_study_1001_closed_reference_otu_table.biom"))
qiita_config.portal = self._old_portal
def _wait_for_jobs(self, analysis):
for j in analysis.jobs:
wait_for_processing_job(j.id)
if j.status == 'error':
print(j.log.msg)
def _create_analyses_with_samples(self, user='demo@microbio.me',
merge=False):
"""Aux function to create an analysis with samples
Parameters
----------
user : qiita_db.user.User, optional
The user email to attach to the analysis. Default: demo@microbio.me
merge : bool, optional
Merge duplicated ids or not
Returns
-------
qiita_db.analysis.Analysis
Notes
-----
Replicates the samples contained in Analysis(1) at the moment of
creation of this function (September 15, 2016)
"""
user = qdb.user.User(user)
dflt_analysis = user.default_analysis
dflt_analysis.add_samples(
{4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']})
new = qdb.analysis.Analysis.create(
user, "newAnalysis", "A New Analysis", from_default=True,
merge_duplicated_sample_ids=merge)
self._wait_for_jobs(new)
return new
def test_lock_samples(self):
dflt = qdb.user.User('demo@microbio.me').default_analysis
# The default analysis can have samples added/removed
dflt._lock_samples()
QE = qdb.exceptions
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
qdb.analysis.Analysis(1)._lock_samples()
def test_get_by_status(self):
qiita_config.portal = 'QIITA'
self.assertEqual(
qdb.analysis.Analysis.get_by_status('public'), set([]))
qiita_config.portal = 'EMP'
self.assertEqual(
qdb.analysis.Analysis.get_by_status('public'), set([]))
qiita_config.portal = 'QIITA'
self.analysis.artifacts[0].visibility = 'public'
self.assertEqual(qdb.analysis.Analysis.get_by_status('public'),
{self.analysis})
qiita_config.portal = 'EMP'
self.assertEqual(
qdb.analysis.Analysis.get_by_status('public'), set([]))
def test_can_be_publicized(self):
analysis = qdb.analysis.Analysis(1)
self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6]))
a4 = qdb.artifact.Artifact(4)
a4.visibility = 'public'
self.assertEqual(analysis.can_be_publicized, (True, []))
a4.visibility = 'private'
self.assertEqual(analysis.can_be_publicized, (False, [4, 5, 6]))
def test_add_artifact(self):
obs = self._create_analyses_with_samples()
exp = qdb.artifact.Artifact(4)
obs.add_artifact(exp)
self.assertIn(exp, obs.artifacts)
def test_has_access_public(self):
analysis = self._create_analyses_with_samples("admin@foo.bar")
analysis.artifacts[0].visibility = 'public'
qiita_config.portal = 'QIITA'
self.assertTrue(
analysis.has_access(qdb.user.User("demo@microbio.me")))
qiita_config.portal = 'EMP'
self.assertFalse(
analysis.has_access(qdb.user.User("demo@microbio.me")))
def test_has_access_shared(self):
self.assertTrue(
self.analysis.has_access(qdb.user.User("shared@foo.bar")))
def test_has_access_private(self):
self.assertTrue(
self.analysis.has_access(qdb.user.User("test@foo.bar")))
def test_has_access_admin(self):
qiita_config.portal = 'QIITA'
self.assertTrue(
self.analysis.has_access(qdb.user.User("admin@foo.bar")))
qiita_config.portal = 'EMP'
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.analysis.Analysis(1).has_access(qdb.user.User("admin@foo.bar"))
def test_has_access_no_access(self):
self.assertFalse(
self.analysis.has_access(qdb.user.User("demo@microbio.me")))
def test_can_edit(self):
a = qdb.analysis.Analysis(1)
self.assertTrue(a.can_edit(qdb.user.User('test@foo.bar')))
self.assertTrue(a.can_edit(qdb.user.User('shared@foo.bar')))
self.assertTrue(a.can_edit(qdb.user.User('admin@foo.bar')))
self.assertFalse(a.can_edit(qdb.user.User('demo@microbio.me')))
def test_create_nonqiita_portal(self):
qiita_config.portal = "EMP"
obs = qdb.analysis.Analysis.create(
qdb.user.User("admin@foo.bar"), "newAnalysis", "A New Analysis")
# make sure portal is associated
self.assertCountEqual(obs._portals, ["QIITA", "EMP"])
def test_create_from_default(self):
with qdb.sql_connection.TRN:
sql = "SELECT NOW()"
qdb.sql_connection.TRN.add(sql)
time1 = qdb.sql_connection.TRN.execute_fetchlast()
owner = qdb.user.User("test@foo.bar")
obs = qdb.analysis.Analysis.create(
owner, "newAnalysis", "A New Analysis", from_default=True)
self.assertEqual(obs.owner, owner)
self.assertEqual(obs.name, "newAnalysis")
self.assertEqual(obs._portals, ["QIITA"])
self.assertLess(time1, obs.timestamp)
self.assertEqual(obs.description, "A New Analysis")
self.assertCountEqual(obs.samples, [4])
self.assertCountEqual(
obs.samples[4], ['1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'])
self.assertEqual(obs.data_types, ['18S'])
self.assertEqual(obs.shared_with, [])
self.assertEqual(obs.mapping_file, None)
self.assertEqual(obs.tgz, None)
self.assertNotEqual(obs.jobs, [])
self.assertEqual(obs.pmid, None)
def test_exists(self):
qiita_config.portal = 'QIITA'
self.assertTrue(qdb.analysis.Analysis.exists(1))
self.assertFalse(qdb.analysis.Analysis.exists(1000))
qiita_config.portal = 'EMP'
self.assertFalse(qdb.analysis.Analysis.exists(1))
self.assertFalse(qdb.analysis.Analysis.exists(1000))
def test_delete(self):
# successful delete
new = qdb.analysis.Analysis.create(
qdb.user.User('demo@microbio.me'), "newAnalysis",
"A New Analysis")
self.assertTrue(qdb.analysis.Analysis.exists(new.id))
qdb.analysis.Analysis.delete(new.id)
self.assertFalse(qdb.analysis.Analysis.exists(new.id))
# no possible to delete
QE = qdb.exceptions
with self.assertRaises(QE.QiitaDBUnknownIDError):
qdb.analysis.Analysis.delete(new.id)
# Analysis with artifacts
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
qdb.analysis.Analysis.delete(1)
def test_retrieve_owner(self):
self.assertEqual(self.analysis.owner, qdb.user.User("test@foo.bar"))
def test_retrieve_name(self):
self.assertEqual(self.analysis.name, "SomeAnalysis")
def test_retrieve_description(self):
self.assertEqual(self.analysis.description, "A test analysis")
def test_set_description(self):
self.analysis.description = "New description"
self.assertEqual(self.analysis.description, "New description")
def test_retrieve_samples(self):
exp = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'],
5: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'],
6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']}
self.assertCountEqual(self.analysis.samples, exp)
def test_retrieve_portal(self):
self.assertEqual(self.analysis._portals, ["QIITA"])
def test_retrieve_data_types(self):
exp = ['18S', '16S']
self.assertCountEqual(self.analysis.data_types, exp)
def test_retrieve_shared_with(self):
self.assertEqual(self.analysis.shared_with,
[qdb.user.User("shared@foo.bar")])
def test_retrieve_jobs(self):
self.assertEqual(self.analysis.jobs, [])
def test_retrieve_pmid(self):
self.assertEqual(self.analysis.pmid, "121112")
def test_set_pmid(self):
new = self._create_analyses_with_samples("admin@foo.bar")
self.assertIsNone(new.pmid)
new.pmid = "11211221212213"
self.assertEqual(new.pmid, "11211221212213")
def test_retrieve_mapping_file(self):
exp = join(self.fp, "1_analysis_mapping.txt")
obs = self.analysis.mapping_file
self.assertIsNotNone(obs)
self.assertEqual(
qdb.util.get_filepath_information(obs)['fullpath'], exp)
self.assertTrue(exists(exp))
def test_retrieve_tgz(self):
# generating here as the tgz is only generated once the analysis runs
# to completion (un)successfully
analysis = self._create_analyses_with_samples("admin@foo.bar")
fp = self.get_fp('test.tgz')
with open(fp, 'w') as f:
f.write('')
analysis._add_file(fp, 'tgz')
self.assertEqual(analysis.tgz, fp)
def test_retrieve_tgz_none(self):
self.assertIsNone(self.analysis.tgz)
def test_summary_data(self):
obs = self.analysis.summary_data()
exp = {'studies': 1,
'artifacts': 3,
'samples': 5}
self.assertEqual(obs, exp)
def test_add_remove_samples(self):
analysis = qdb.user.User('shared@foo.bar').default_analysis
exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193'],
5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193'],
6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193']}
analysis.add_samples(exp)
obs = analysis.samples
self.assertCountEqual(list(obs.keys()), exp.keys())
for k in obs:
self.assertCountEqual(obs[k], exp[k])
analysis.remove_samples(artifacts=(qdb.artifact.Artifact(4), ),
samples=('1.SKB8.640193', ))
exp = {4: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180'],
5: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193'],
6: ['1.SKD8.640184', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKB8.640193']}
obs = analysis.samples
self.assertCountEqual(list(obs.keys()), exp.keys())
for k in obs:
self.assertCountEqual(obs[k], exp[k])
analysis.remove_samples(samples=('1.SKD8.640184', ))
exp = {4: ['1.SKB7.640196', '1.SKM9.640192', '1.SKM4.640180'],
5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180'],
6: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180']}
self.assertCountEqual(analysis.samples, exp)
analysis.remove_samples(
artifacts=(qdb.artifact.Artifact(4), qdb.artifact.Artifact(5)))
exp = {6: {'1.SKB7.640196', '1.SKB8.640193',
'1.SKM4.640180', '1.SKM9.640192'}}
self.assertCountEqual(analysis.samples, exp)
def test_share_unshare(self):
analysis = self._create_analyses_with_samples()
user = qdb.user.User("admin@foo.bar")
self.assertEqual(analysis.shared_with, [])
analysis.share(user)
exp = [user]
self.assertEqual(analysis.shared_with, exp)
analysis.unshare(user)
self.assertEqual(analysis.shared_with, [])
def test_build_mapping_file(self):
analysis = self._create_analyses_with_samples()
samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
npt.assert_warns(qdb.exceptions.QiitaDBWarning,
analysis._build_mapping_file, samples)
obs = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
exp = self.get_fp("%s_analysis_mapping.txt" % analysis.id)
self.assertEqual(obs, exp)
obs = qdb.metadata_template.util.load_template_to_dataframe(
obs, index='#SampleID')
exp = qdb.metadata_template.util.load_template_to_dataframe(
self.map_exp_fp, index='#SampleID')
# assert_frame_equal assumes same order on the rows, thus sorting
# frames by index
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
# then sorting columns
obs = obs.reindex(sorted(obs.columns), axis=1)
exp = exp.reindex(sorted(exp.columns), axis=1)
assert_frame_equal(obs, exp, check_like=True)
def test_build_mapping_file_duplicated_samples_no_merge(self):
analysis = self._create_analyses_with_samples()
samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'],
3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
npt.assert_warns(qdb.exceptions.QiitaDBWarning,
analysis._build_mapping_file, samples, True)
mapping_fp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
obs = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID')
exp = qdb.metadata_template.util.load_template_to_dataframe(
self.duplicated_samples_not_merged, index='#SampleID')
# assert_frame_equal assumes same order on the rows, thus sorting
# frames by index
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
# then sorting columns
obs = obs.reindex(sorted(obs.columns), axis=1)
exp = exp.reindex(sorted(exp.columns), axis=1)
assert_frame_equal(obs, exp, check_like=True)
def test_build_mapping_file_duplicated_samples_merge(self):
analysis = self._create_analyses_with_samples()
samples = {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'],
3: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']}
npt.assert_warns(qdb.exceptions.QiitaDBWarning,
analysis._build_mapping_file, samples)
mapping_fp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
obs = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID')
exp = qdb.metadata_template.util.load_template_to_dataframe(
self.map_exp_fp, index='#SampleID')
# assert_frame_equal assumes same order on the rows, thus sorting
# frames by index
obs.sort_index(inplace=True)
exp.sort_index(inplace=True)
# then sorting columns
obs = obs.reindex(sorted(obs.columns), axis=1)
exp = exp.reindex(sorted(exp.columns), axis=1)
assert_frame_equal(obs, exp, check_like=True)
def test_build_biom_tables(self):
analysis = self._create_analyses_with_samples()
grouped_samples = {
'18S || algorithm': [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]}
obs_bioms = analysis._build_biom_tables(grouped_samples)
biom_fp = self.get_fp(
"%s_analysis_18S_algorithm.biom" % analysis.id)
obs = [(a, basename(b)) for a, b, _ in obs_bioms]
self.assertEqual(obs, [('18S', basename(biom_fp))])
table = load_table(obs_bioms[0][1])
obs = set(table.ids(axis='sample'))
exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'}
self.assertEqual(obs, exp)
def test_build_biom_tables_with_references(self):
analysis = self._create_analyses_with_samples()
analysis_id = analysis.id
grouped_samples = {
('18S || Pick closed-reference OTUs (reference: 1) | '
'Split libraries FASTQ'): [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])],
('18S || Pick closed-reference OTUs (reference: 1) | '
'Trim (lenght: 150)'): [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])],
('16S || Pick closed-reference OTUs (reference: 2) | '
'Trim (lenght: 100)'): [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]}
obs_bioms = analysis._build_biom_tables(grouped_samples)
obs = [(a, basename(b)) for a, b, _ in obs_bioms]
exp = [
('16S', '%s_analysis_16S_PickclosedreferenceOTUsreference2'
'Trimlenght100.biom' % analysis_id),
('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1'
'SplitlibrariesFASTQ.biom' % analysis_id),
('18S', '%s_analysis_18S_PickclosedreferenceOTUsreference1'
'Trimlenght150.biom' % analysis_id)]
self.assertCountEqual(obs, exp)
exp = {'1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'}
for dt, fp, _ in obs_bioms:
table = load_table(fp)
obs = set(table.ids(axis='sample'))
self.assertEqual(obs, exp)
def test_build_biom_tables_duplicated_samples_not_merge(self):
analysis = self._create_analyses_with_samples()
grouped_samples = {
'18S || algorithm': [
(4, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']),
(5, ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'])]}
obs_bioms = analysis._build_biom_tables(grouped_samples, True)
obs = [(a, basename(b)) for a, b, _ in obs_bioms]
biom_fp = (
"%s_analysis_18S_algorithm.biom" % analysis.id)
self.assertEqual(obs, [('18S', biom_fp)])
table = load_table(obs_bioms[0][1])
obs = set(table.ids(axis='sample'))
exp = {'4.1.SKD8.640184', '4.1.SKB7.640196', '4.1.SKB8.640193',
'5.1.SKB8.640193', '5.1.SKB7.640196', '5.1.SKD8.640184'}
self.assertCountEqual(obs, exp)
def test_build_biom_tables_raise_error_due_to_sample_selection(self):
grouped_samples = {
'18S || algorithm': [
(4, ['sample_name_1', 'sample_name_2', 'sample_name_3'])]}
with self.assertRaises(RuntimeError):
self.analysis._build_biom_tables(grouped_samples)
def test_build_files(self):
analysis = self._create_analyses_with_samples()
biom_tables = npt.assert_warns(
qdb.exceptions.QiitaDBWarning, analysis.build_files, False)
# testing that the generated files have the same sample ids
biom_fp = biom_tables[0][1]
biom_ids = load_table(biom_fp).ids(axis='sample')
mapping_fp = qdb.util.get_filepath_information(
analysis.mapping_file)['fullpath']
mf_ids = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID').index
self.assertCountEqual(biom_ids, mf_ids)
# now that the samples have been prefixed
exp = ['1.SKM9.640192', '1.SKM4.640180', '1.SKD8.640184',
'1.SKB8.640193', '1.SKB7.640196']
self.assertCountEqual(biom_ids, exp)
def test_build_files_post_processing_cmd(self):
tmp = qdb.artifact.Artifact(4).processing_parameters.command
cmd_id = tmp.id
# set a known artifact's additional processing command
# to a known value. Then test for it.
# qiita_db/test/support_files/worker.py will work w/py2.7 & 3.6 envs.
results = {}
results['script_env'] = 'source deactivate; source activate qiita;'
results['script_path'] = 'qiita_db/test/support_files/worker.py'
# no additional parameters are needed for worker.py
# fp_biom and fp_archive will be generated by build_files()
results['script_params'] = {}
# convert to json representation and store in PostgreSQL
results = dumps(results)
with qdb.sql_connection.TRN:
sql = """UPDATE qiita.software_command
SET post_processing_cmd = %s
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [results, cmd_id])
qdb.sql_connection.TRN.execute()
# create a sample analysis and run build_files on it.
analysis = self._create_analyses_with_samples()
biom_files = analysis.build_files(False)
# if build_files used additional processing commands, it will
# return a couple of tuples, where the third element contains
# output archive-artifact data.
self.assertEqual(2, len(biom_files))
aid = analysis.id
exp = [('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries'
'FASTQ.biom' % aid, None),
('%d_analysis_18S_PickclosedreferenceOTUsSplitlibraries'
'FASTQ.biom' % aid, 'archive_%d.tre' % aid)]
obs = [(basename(fp1),
basename(fp2) if fp2 is not None else None)
for _, fp1, fp2 in biom_files]
self.assertEqual(obs, exp)
# cleanup (assume command was NULL previously)
with qdb.sql_connection.TRN:
sql = """UPDATE qiita.software_command
SET post_processing_cmd = NULL
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [cmd_id])
qdb.sql_connection.TRN.execute()
def test_build_files_merge_duplicated_sample_ids(self):
user = qdb.user.User("demo@microbio.me")
dflt_analysis = user.default_analysis
dflt_analysis.add_samples(
{4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180'],
5: ['1.SKB8.640193', '1.SKB7.640196', '1.SKM9.640192',
'1.SKM4.640180', '1.SKD8.640184'],
6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
'1.SKM9.640192', '1.SKM4.640180']})
new = qdb.analysis.Analysis.create(
user, "newAnalysis", "A New Analysis", from_default=True,
merge_duplicated_sample_ids=True)
self._wait_for_jobs(new)
biom_tables = npt.assert_warns(
qdb.exceptions.QiitaDBWarning, new.build_files, True)
# testing that the generated files have the same sample ids
biom_ids = []
for _, fp, _ in biom_tables:
biom_ids.extend(load_table(fp).ids(axis='sample'))
mapping_fp = qdb.util.get_filepath_information(
new.mapping_file)['fullpath']
mf_ids = qdb.metadata_template.util.load_template_to_dataframe(
mapping_fp, index='#SampleID').index
self.assertCountEqual(biom_ids, mf_ids)
# now that the samples have been prefixed
exp = ['4.1.SKM9.640192', '4.1.SKM4.640180', '4.1.SKD8.640184',
'4.1.SKB8.640193', '4.1.SKB7.640196',
'5.1.SKM9.640192', '5.1.SKM4.640180', '5.1.SKD8.640184',
'5.1.SKB8.640193', '5.1.SKB7.640196',
'6.1.SKM9.640192', '6.1.SKM4.640180', '6.1.SKD8.640184',
'6.1.SKB8.640193', '6.1.SKB7.640196']
self.assertCountEqual(biom_ids, exp)
def test_add_file(self):
# Tested indirectly through build_files
pass
def test_is_public_make_public(self):
analysis = self._create_analyses_with_samples()
self.assertFalse(analysis.is_public)
# testing errors
with self.assertRaises(ValueError):
analysis.make_public()
# testing successfully making public
# 4 is the only artifact being used in _create_analyses_with_samples
qdb.artifact.Artifact(4).visibility = 'public'
analysis.make_public()
self.assertTrue(analysis.is_public)
if __name__ == "__main__":
main()
|
"""
This is the pseudocode of the framework
It will be rewrited by python follow
These cases are good and need not to wait for the pedestrians according to the rule
#1.If (Pedestrians are detected but not overstepping the lane line)
#2.If (Pedestrians are waiting out of the lane line)
#3.If (Pedestrians are moving cross the other side of the lane line)
These cases are bad and need to stop and wait for the pedestrians according to the rule
#1.If (Pedestrians overstepped the lane line of the first side)
#2.If (Pedestrians are waiting on the lane line that has Double Yellow lines)
"""
import matplotlib.pyplot as plt
import cv2
import PROTOTYPE.laneline_detection.utils
import imageio
# people contains the rectangle's x and y positions, simple example [(1, 30),(30, 1)]
def is_courtesy(image, lines, people):
imshape = image.shape
ml, bl, mr, br = PROTOTYPE.laneline_detection.utils.get_lines(lines, imshape)
peoplex = (people[0][0] + people[1][0]) / 2
peopley1 = people[0][1]
peopley2 = people[1][1]
cv2.line(image, (peoplex, peopley1), (peoplex, peopley2), (0, 255, 0), 5)
'''
lane line equation: y = mx + b
if the line intersect with the line represents people
then we need to wait according to the rule
'''
# considering if we need to wait. To add situations by if, please return True
if mr * peoplex + br < max(peopley1, peopley2):
print (1)
if ml * peoplex + bl < max(peopley1, peopley2):
print (2)
cv2.putText(image, "Please Wait!", (0, imshape[0]), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
return True
# considering if we need not to wait
print (3)
cv2.putText(image, "It's OK!", (0, imshape[0]), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0), 2)
return False
|
"""
textbook example: double ended queue.
use cyclic array structure, change size if necessary.
"""
class Empty(Exception):
pass
class ArrayDoubleEndedQueue:
"""double-ended-queue, both ends can add or delete"""
DEFAULT_CAPACITY = 10
def __init__(self):
self._data = [None] * self.DEFAULT_CAPACITY
self._front = 0
self._capacity = self.DEFAULT_CAPACITY
self._num = 0
def add_first(self, value):
if self._num == self._capacity:
# resize to double the capacity
new = [None] * (self._capacity * 2)
for i in range(self._num):
# note, use i+1, since you are going to add one at the front
new[i+1] = self._data[(self._front+i) % self._capacity]
self._data = new[:]
self._front = 1 # currently the first value starts at [1]
self._capacity *= 2
self._front = (self._front - 1) % self._capacity
self._data[self._front] = value
self._num += 1
def add_last(self, value):
# similar code to the add_first
if self._num == self._capacity:
# resize to double the capacity
new = [None] * (self._capacity * 2)
for i in range(self._num):
# note, use i, since the new data is appended to the last
new[i] = self._data[(self._front+i) % self._capacity]
self._data = new[:]
self._front = 0 # front set to 0, unlike add_first
self._capacity *= 2
# note the difference to the add_first
self._data[(self._front + self._num) % self._capacity] = value
self._num += 1
def delete_first(self):
if self._num == 0:
raise Empty("double-ended-queue is already empty")
value = self._data[self._front] # after potential resizing, return this value
self._data[self._front] = None
self._front = (self._front + 1) % self._capacity
self._num -= 1
if self._capacity > self.DEFAULT_CAPACITY and self._num <= self._capacity // 4:
self._down_size()
return value
def delete_last(self):
# delete the last value
if self._num == 0:
raise Empty("double-ended-queue is already empty")
value = self._data[(self._front + self._num - 1) % self._capacity] # after potential resizing, return this value
self._data[(self._front + self._num - 1) % self._capacity] = None
self._num -= 1
if self._capacity > self.DEFAULT_CAPACITY and self._num <= self._capacity // 4:
self._down_size()
return value
def first(self):
if self._num == 0:
raise Empty("double-ended-queue is already empty")
return self._data[self._front]
def last(self):
if self._num == 0:
raise Empty("double-ended-queue is already empty")
# !!! minus 1
return self._data[(self._front + self._num - 1) % self._capacity]
def is_empty(self):
return self._num == 0
def __len__(self):
return self._num
def show(self):
string = " <= ".join(str(self._data[i % self._capacity]) for i in range(self._front, self._front+self._num, 1))
print(string)
# additional function for downsize
# upsize is a little different for add_first and add_last, so does not combine to one
def _down_size(self):
# downsize operation for delete_first and delete_last
new_data = [None] * max(self._capacity // 2, self.DEFAULT_CAPACITY) # default capacity = 10
for i in range(self._num):
new_data[i] = self._data[(self._front + i) % self._capacity]
self._data = new_data[:]
self._capacity = self._capacity // 2
self._front = 0
if __name__ == '__main__':
q = ArrayDoubleEndedQueue()
for i in range(30):
if i % 2 == 0:
q.add_first(i)
else:
q.add_last(i)
print("initial: insert 30 numbers")
q.show()
for _ in range(10):
print(q.delete_first())
print(q.delete_last())
print("delete first 10 and last 10")
q.show()
print("length {}".format(len(q)))
print("first value: {}".format(q.first()))
print("last value: {}".format(q.last()))
print("is empty: {}".format(q.is_empty())) |
from django.shortcuts import render
from django.http import HttpResponse
from tour.models import *
from activity.models import Activity
from training.models import Training
from organizer.models import Organizer
from django.shortcuts import get_object_or_404
import smtplib
from Wactop.mail import *
import smtplib
# sendemail("kamil129@inbox.ru", "test2")
def home(request):
tourcount = Tour.objects.filter(status=1).count()
activitycount = Activity.objects.filter(status=1).count()
trainingcount = Training.objects.filter(status=1).count()
organizercount = Organizer.objects.filter(registered=False).count()
if request.user.is_authenticated and not request.user.is_superuser:
pk = Organizer.objects.get(user=request.user.id).id
else:
pk = 1
context = {
'tourcount': tourcount,
'activitycount': activitycount,
'trainingcount': trainingcount,
'organizercount': organizercount,
'pk': pk
}
return render(request, 'home-page.html', context)
def test(request):
tour = Tour.objects.get(pk=8)
detail = TourDetailEN.objects.filter(tour=tour)
context = {
'detail': detail
}
return render(request, 'test2.html', context) |
from property_price_model import create_app, db
from property_price_model.models import Sale
app = create_app()
@app.shell_context_processor
def make_shell_context():
return {"db": db, "Sale": Sale}
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
|
import numpy as np
__author__ = 'syao'
# file HEADER
HEADER_MI1B2T_URL = 'ftp://l5eil01.larc.nasa.gov/MISR/MI1B2T.003/'
HEADER_MI1B2T_FILENAME = 'MISR_AM1_GRP_TERRAIN_GM_P'
HEADER_MIL2ASAE_URL = 'ftp://l5eil01.larc.nasa.gov/MISR/MIL2ASAE.002/'
HEADER_MIL2ASAE_FILENAME = 'MISR_AM1_AS_AEROSOL_P'
HEADER_MIL2ASAF = 'ftp://l5eil01.larc.nasa.gov/MISR/MIL2ASAF.001/'
MIANSMT_SS_FILENAME = 'MISR_AM1_SMART_TOA_RHO_ATM_SS_F02_0009.hdf'
MIANSMT_MS_FILENAME = 'MISR_AM1_SMART_TOA_RHO_ATM_MS_F02_0009.hdf'
MIANSMT_TDIFF_FILENAME = 'MISR_AM1_SMART_TDIFF_F02_0009.hdf'
MIANSMT_EDIFF_FILENAME = 'MISR_AM1_SMART_BOA_EDIFF_F02_0009.hdf'
HEADER_MIANCAGP_URL1 = 'ftp://l5eil01.larc.nasa.gov/MISR/MIANCAGP.001/1999.11.07/'
HEADER_MIANCAGP_URL2 = 'ftp://l5eil01.larc.nasa.gov/MISR/MIANCAGP.001/1999.11.08/'
HEADER_MIANCAGP_FILENAME = 'MISR_AM1_AGP_P'
# MISR camera parameters
CAM_DF = 0
CAM_CF = 1
CAM_BF = 2
CAM_AF = 3
CAM_AN = 4
CAM_AA = 5
CAM_BA = 6
CAM_CA = 7
CAM_DA = 8
CAM_NAME = {'DF', 'CF', 'BF', 'AF', 'AN', 'AA', 'BA', 'CA', 'DA'}
CAM_DIM = len(CAM_NAME)
# MISR spatial resolutions
R275 = 275
R1100 = 1100
R2200 = 2200
R4400 = 4400
R8800 = 8800
R17600 = 17600
XDIM_R1100 = 128
XDIM_R2200 = 64
XDIM_R4400 = 32
XDIM_R8800 = 16
XDIM_R17600 = 8
YDIM_R1100 = 512
YDIM_R2200 = 256
YDIM_R4400 = 128
YDIM_R8800 = 64
YDIM_R17600 = 32
# r = r4400 # default resolution for retrieval
# XDIM_r = XDIM_r4400 # default X dimension
# YDIM_r = YDIM_r4400 # default Y dimension
# Number of sub-regions in a region
# RegSize = r / r1100
# Scale factor to the 17.6-km standard region
# RegScale = r17600 / r
# XDIM is the number of rows in a block, depending on the resolution
# YDIM is the number of columns in a block, depending on the resolution
# MISR bands parameters
BAND_BLUE = 0
BAND_GREEN = 1
BAND_RED = 2
BAND_NIR = 3
BAND_DIM = 4
NCHANNEL = BAND_DIM*CAM_DIM
BAND_NAME = {'BlueBand', 'GreenBand', 'RedBand', 'NIRBand'}
BAND_USED = np.ones(BAND_DIM)
CHANNEL_USED = map(bool, (np.kron(BAND_USED, np.ones(CAM_DIM))))
BAND_RADIANCE = ['Blue Radiance/RDQI', 'Green Radiance/RDQI', 'Red Radiance/RDQI', 'NIR Radiance/RDQI']
CONFIG_RDQI1 = 1
CONFIG_C_LAMBDA = np.array([5.67e-6, 1.04e-4, 4.89e-5, 3.94e-6])
CONFIG_SPECTRAL_CORR_MATRIX = np.array([[1.0106, -0.0057, -0.0038, -0.0011],
[-0.0080, 1.0200, -0.0086, -0.0034],
[-0.0060, -0.0048, 1.0145, -0.0036],
[-0.0048, -0.0033, -0.0136, 1.0217]])
# sample_size = RegSize*RegSize
# CONFIG_MIN_HET_SUBR_THRESH = sample_size/4
MIN_CAM_USED = 2
CONFIG_FIRST_EIGENVALUE_FOR_EOFS = 1
CONFIG_EIGENVECTOR_VARIANCE_THRESH = 0.95
# MISR aerosol model parameters
MODEL_COMPONENTDIM = 21
MODEL_MIXTUREDIM = 74
MODEL_PRESSURE = np.array([607.95, 1013.25])
MODEL_MU0GRID = np.arange(0.2, 1.0, 0.01)
MODEL_MUGRID = np.array([0.31, 0.32, 0.33, 0.34, 0.35, 0.47, 0.48, 0.49, 0.5, 0.51, 0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9, 0.95, 0.96, 0.97, 0.98, 0.99, 1])
MODEL_SCATTERANGLEGRID = np.array([-1, 0, 2.5, 5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25, 27.5, 30, 32.5, 35, 37.5, 40, 42.5, 45, 47.5, 50, 52.5, 55, 57.5, 60, 62.5, 65, 67.5, 70, 72.5, 75, 77.5, 80, 82.5,
85, 87.5, 90, 92.5, 95, 97.5, 100, 102.5, 105, 107.5, 110, 112.5, 115, 117.5, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144,
145, 146, 147, 148, 149, 150, 152.5, 155, 157.5, 160, 162.5, 165, 167.5, 170, 172.5, 175, 176, 177, 178, 179, 180, 181])
MODEL_OPTICALDEPTHGRID = np.array([0, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1, 1.5, 2, 3, 4, 6])
MODEL_OPTICALDEPTHLEN = len(MODEL_OPTICALDEPTHGRID)
MODEL_AOTGRIDGAP = 0.025
MODEL_OPTICALDEPTHFINERGRID = np.arange(0, 3, MODEL_AOTGRIDGAP)
MODEL_OPTICALDEPTHFINERGRIDLEN = len(MODEL_OPTICALDEPTHFINERGRID)
COMPONENT_PARTICLE = np.array([1, 2, 3, 6, 8, 14, 19, 21]) - 1
COMPONENT_NUM = len(COMPONENT_PARTICLE)
CONFIG_ALBEDO_THRESH_LAND = 0.015
CAM_GRID, COMP_GRID, TAU_GRID = np.meshgrid(np.arange(CAM_DIM), np.arange(COMPONENT_NUM), MODEL_OPTICALDEPTHGRID)
CAM_GRID = np.ravel(CAM_GRID)
COMP_GRID = np.ravel(COMP_GRID)
TAU_GRID = np.ravel(TAU_GRID)
POINTS = np.vstack((CAM_GRID, COMP_GRID, TAU_GRID)).T |
## Python 3
prev2 = 1
prev1 = 1
fibList = [1, 1]
def Fibby(prev1, prev2):
nextFib = prev1 + prev2
if(nextFib < 4000000):
fibList.append(nextFib)
Fibby(prev2, nextFib)
Fibby(prev1,prev2)
answer = sum(filter(lambda x: x % 2 ==0 , fibList))
print("The sum of the even-valued terms is %d" % answer)
|
from flask import request
from gateway.app import app
from gateway.http_client import requirementmanager_http_client
from gateway.utils.handle_api import (
get_client_username, handle_request_response
)
@app.route('/requirement/archive/tree/list', methods=['GET'])
@handle_request_response
@get_client_username
def requirement_archive_tree_list(client_username: str):
args = request.args.to_dict()
status_code, resp_body = requirementmanager_http_client.get(
'requirement/archive/tree/list', client_username, params=args
)
return status_code, resp_body
|
test_str = "UAqwertyuiopasdfghjklPl;p[/"
result = []
for symbol in test_str:
if symbol.lower() not in "eyuioa" and symbol.isalpha():
# print(f"symbol: {symbol}")
result.append(symbol)
print(result)
join_str = "".join(result)
print(join_str)
# split_str = list(test_str)
# print(split_str)
# # tuple - кортеж - неизменяемый тип
# # list - список - изменяемый тип
#
#
# my_tuple = (1, 2, 3, "tuple", (-1, 0), None)
# print(type(my_tuple),my_tuple)
#
# my_list = [1, 2, 3, "list", (-1, 0), None]
# print(type(my_list),my_list)
#
# index = -1 # Обращение по индексу :
# my_tuple = list(my_tuple)
# my_tuple[index] = "new_value"
# my_tuple=tuple(my_tuple)
# my_list[index] = 3
# value_tuple = my_tuple[index]
# value_list = my_list[index]
# print(value_tuple, value_list)
# print(type(my_list),my_list)
# # Срезы как в строках
#
#
# # Приведение к типам
# new_list = list(my_tuple)
# new_tuple = tuple(my_list)
# print("new_list", type(new_list), new_list)
# print("new_tuple", type(new_tuple), new_tuple)
# new_list = []
#
# new_list.append('first')
# new_list.append('second')
# new_list.append([1,3,5])
# last_value = new_list.pop()
# print(new_list)
# print(last_value) |
from pratice.files02.Car import Car
car = Car('BMW', 'M3')
print(car.data()) |
"""
问题描述
给定n个正整数,找出它们中出现次数最多的数。如果这样的数有多个,请输出其中最小的一个。
输入格式
输入的第一行只有一个正整数n(1 ≤ n ≤ 1000),表示数字的个数。
输入的第二行有n个整数s1, s2, …, sn (1 ≤ si ≤ 10000, 1 ≤ i ≤ n)。相邻的数用空格分隔。
输出格式
输出这n个次数中出现次数最多的数。如果这样的数有多个,输出其中最小的一个。
样例输入
6
10 1 10 20 30 20
样例输出
10
---------------------
"""
if __name__ == '__main__':
n = eval(input())
e_list = list(map(int, input().split()))
b = list(set(e_list))
c = dict(zip(b, map(e_list.count, b)))
print(max(c, key=c.get))
# print(b)
# n = eval(input())
# exam_list = list(map(int, input().split()))
# b = list(set(exam_list))
# b.sort()
# s = dict(zip(b, map(exam_list.count, b)))
# print('s=', s)
# print(max(s, key=s.get))
# print(max(s.values()))
|
import mock
import unittest
from flask import Flask
from flask_testing import TestCase
from flask_watchman import Watchman, Environment
class TestWatchman(TestCase):
"""
Test flask apps that are using class based views
"""
def create_app(self):
app = Flask(__name__, static_folder=None)
Watchman(app, environment={})
app.config.setdefault('APP_LOGGING', 'MY LOGGING')
return app
def test_watchman_routes_exist(self):
"""
Test that the routes added exist
"""
r = self.client.options('/version')
self.assertStatus(r, 200)
r = self.client.options('/environment')
self.assertStatus(r, 200)
@mock.patch('flask_watchman.subprocess')
def test_version_route_works(self, mocked_subprocess):
"""
Tests that the version route works
"""
process = mock.Mock()
process.communicate.side_effect = [
['latest-commit', 'error'],
['latest-release', 'error']
]
mocked_subprocess.Popen.return_value = process
r = self.client.get('/version')
self.assertStatus(r, 200)
self.assertTrue(mocked_subprocess.Popen.called)
self.assertEqual(
r.json['commit'],
'latest-commit'
)
self.assertEqual(
r.json['release'],
'latest-release'
)
@mock.patch('flask_watchman.os.environ')
def test_environment_route_works(self, mocked_environ):
"""
Tests that the environment route works
"""
mocked_environ.keys.return_value = ['OS_SHELL']
mocked_environ.get.return_value = '/bin/favourite-shell'
r = self.client.get('/environment')
self.assertStatus(r, 200)
self.assertEqual(
r.json['os']['OS_SHELL'],
'/bin/favourite-shell'
)
self.assertEqual(
r.json['app']['APP_LOGGING'],
'MY LOGGING'
)
class TestWatchmanScopes(unittest.TestCase):
def tearDown(self):
"""
Hack to cleanup class attributes set in the tests
"""
for key in ['scopes', 'decorators', 'rate_limit']:
try:
delattr(Environment, key)
except AttributeError:
pass
def test_adding_scopes_to_routes(self):
"""
Check the behaviour when scopes are specified
"""
app = Flask(__name__, static_folder=None)
environment = {
'scopes': ['adsws:internal'],
}
with self.assertRaises(AttributeError):
getattr(Environment, 'scopes')
getattr(Environment, 'rate_limit')
getattr(Environment, 'decorators')
Watchman(app, environment=environment)
self.assertEqual(Environment.scopes, ['adsws:internal'])
self.assertIsInstance(Environment.decorators, list)
self.assertIsInstance(Environment.rate_limit, list)
def test_empty_scopes(self):
"""
Check the behaviour when empty scopes are requested
"""
app = Flask(__name__, static_folder=None)
environment = {
'scopes': [],
}
with self.assertRaises(AttributeError):
getattr(Environment, 'scopes')
getattr(Environment, 'rate_limit')
getattr(Environment, 'decorators')
Watchman(app, environment=environment)
self.assertEqual(Environment.scopes, [])
self.assertIsInstance(Environment.decorators, list)
self.assertIsInstance(Environment.rate_limit, list)
def test_no_scopes(self):
"""
Check the behaviour when no scopes are requested at all
"""
app = Flask(__name__, static_folder=None)
with self.assertRaises(AttributeError):
getattr(Environment, 'scopes')
getattr(Environment, 'rate_limit')
getattr(Environment, 'decorators')
Watchman(app)
with self.assertRaises(AttributeError):
getattr(Environment, 'scopes')
getattr(Environment, 'decorators')
getattr(Environment, 'rate_limit')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
from tkinter import *
window = Tk()
b1 = Button(window, text="첫번째 버튼")
b2 = Button(window, text="두번째 버튼")
b1.pack(side=LEFT)
b2.pack(side=LEFT)
window.mainloop() |
import os
import re
import glob
import pickle
import pandas as pd
from utils.transform_utils import *
# Get all posts within the data directory
posts = glob.glob('data/posts/*.p')
# Iterate over all posts within a class
for fp in posts:
# Load each post into a DataFrame and store its networkid
df = pd.DataFrame(pickle.load(open(fp, "rb")))
network_id = re.search("posts_(.*).p", fp).group(1)
# Compute different metrics about the class
df['created'] = pd.to_datetime(df['created'])
df['num_revisions'] = df['history'].apply(lambda x: len(x))
df['subject'] = df['history'].apply(lambda x: x[0]['subject'])
df['is_student'] = df['tags'].apply(lambda x: 'student' in x)
df['is_instructor'] = df['tags'].apply(lambda x: 'instructor-note' in x)
df['is_announcement'] = df['config'].apply(lambda x: 1 if 'is_announcement' in x else 0)
df['num_children'] = df['children'].apply(lambda x: len(list(num_nested_dicts(x[0], 'children'))) if len(x) > 0 else 0)
# Remove HTML from text column
df['text'] = df['history'].apply(lambda x: re.sub('<[^<]+?>|\n', ' ', x[0]['content']))
# Reorder the columns
df = df[['id', 'created', 'type', 'folders', 'tags', 'is_announcement', 'history', 'children', 'tag_good', 'is_student', 'no_answer', 'num_children', 'num_favorites', 'num_revisions', 'unique_views', 'subject','text']]
with open(f"data/dataframes/{fp[11:-23]}_dataframe_{network_id}.p", 'wb') as f:
pickle.dump(df, f) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
class HtmlDownloader(object):
def download(self,url):
if url is None:
return None
url_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'
headers = {
'User-Agent': url_agent
}
response = requests.get(url,headers)
if response.status_code == 200:
response.encoding = 'utf-8'
return response.text
return None
if __name__ == '__main__':
a = HtmlDownloader()
print(a.download('http://theater.mtime.com/China_Fujian_Province_Xiamen/')) |
import pygame
import random
#dimesion fenetre#
largeur=650
hauteur=700
#police#
pygame.font.init()
ma_police=pygame.font.SysFont('Comic Sans MS',30)
ecran=pygame.display.set_mode((largeur,hauteur))
clock=pygame.time.Clock()
FPS=20
#Couleurs RGB (rouge vert bleu)#
White=(180,238,180)
Green=(0,255,0)
Black=(0,0,0)
Red=(255,0,0)
#vaisseau#
vaisseau=[[largeur//2,690]]
taille=10
speed=5
x=0
#alien#
spe=1
aliens = []
xal=spe
descendre = False
#bunker#
bunker = []
#tir#
tir_a= []
tira_x= None
tira_y= None
tirv_x= None
tirv_y= None
#score
score=0
def tirv():
global tirv_x, tirv_y
tirv_x = vaisseau[0][0]
tirv_y = vaisseau[0][1] - 10
def tira():
global tira_x, tira_y
tira_x = aliens[i][j][0]
tira_y = aliens[i][j][1]
def generate_aliens():
global aliens
aliens = []
for line in range(0,5):
aliens.append([])
for j in range (0,11):
aliens[line].append([100 + j*45,100 + line * 45])
def generate_bunker():
global bunker
for line in range(0,4):
bunker.append([70 + line * 140, 550 ])
generate_bunker()
generate_aliens()
game_over=3
while True:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
elif event.type==pygame.KEYDOWN:
#changement de direction#
if event.key == pygame.K_LEFT :
x=-speed
elif event.key == pygame.K_RIGHT :
x=speed
elif event.key == pygame.K_DOWN:
x=0
elif event.key == pygame.K_SPACE:
tirv()
elif event.key == pygame.K_r:
x=0
vaisseau=[[largeur/2,650]]
game_over =3
generate_aliens()
spe=1
ecran.fill(Black)
if game_over!=0:
#mise a jouer des position du vaisseau#
if (vaisseau[0][0] + x < 0 or vaisseau[0][0] + x > largeur-taille) :
x=0
vaisseau.insert(0,[vaisseau[0][0],vaisseau[0][1]])
vaisseau.pop()
elif x != 0:
vaisseau.insert(0,[vaisseau[0][0]+x,vaisseau[0][1]])
vaisseau.pop()
#collision Murs#
if (aliens [0][0][0]+x<0):
spe +=1
xal=1
descendre = True
elif(aliens [0][-1][0]+x>largeur-(taille*2)):
spe += 1
if spe > 10:
spe = 10
xal=-1
descendre = True
#collision tir_a
#collision tir_v
if tirv_x is not None:
collide = False
tmp = []
for i in range(0,len(aliens)):
tmp.append([])
for j in range(0, len(aliens[i])):
if not(aliens[i][j][0] <= tirv_x and tirv_x <= (aliens[i][j][0] + 20) and aliens[i][j][1] <= tirv_y and tirv_y <= (aliens[i][j][1] + 20)):
tmp[-1].append(aliens[i][j])
else:
score+=10
collide = True
aliens = tmp
if collide is True:
tirv_x = None
#DESSINS ALIENS
for i in range(0,len(aliens)):
for j in range(0, len(aliens[i])):
aliens[i][j][0] += (xal*spe)
if descendre == True:
aliens[i][j][1] += 10
if aliens[i][j][1] >= 510:
game_over = 0
pygame.draw.rect(ecran,White,(aliens[i][j][0],aliens[i][j][1],taille*2,taille*2))
descendre = False
#DESSINS TIRS V
if tirv_x is not None:
tirv_x
tirv_y -= 20
if tirv_y <= 0:
tirv_x = None
else:
pygame.draw.rect(ecran,Green,(tirv_x, tirv_y,taille//2,taille))
#DESSINS TIRS A
for i in range(0,len(tir_a)):
pygame.draw.rect(ecran,White,(tir_a[i][0], tir_a[i][1],taille//2,taille))
#dessin bunker
for i in range(0,len(bunker)):
pygame.draw.rect(ecran,Green,(bunker[i][0],bunker[i][1],taille*6,taille*4))
#dessin vaisseau#
for i in range(len(vaisseau)):
pygame.draw.rect(ecran,Green,(vaisseau[i][0],vaisseau[i][1],taille*2,taille))
#mise a jour ecran#
textsurface=ma_police.render("score"+str(score),True,Red)
ecran.blit(textsurface,(largeur//2,50))
elif game_over==0:
textsurface=ma_police.render("GAME OVER",False,Red)
ecran.blit(textsurface,(largeur//2,hauteur//2))
pygame.display.update() |
punctuations=',./;:?"}{[]@!#$%^&*()'
string=input('enter the string')
nopunctuation=''
for i in string:
if i not in punctuations:
nopunctuation=nopunctuation+i
print(nopunctuation) |
# -*- coding: utf-8 -*-
""""
Tool Name: Avalanche paths to 3D
Source Name: AvalanchePathsTo3d.py
Version: ArcGIS 10.3.1
Author: Icelandic Meteorology Office/Ragnar H. Thrastarson
Created: 2016-10-28
Description: A python script tool that takes pre-defined avalanche
paths with pre-defined fields and converts them to 3D and adds M
coordinates by using a surface of some sort (TIN or raster). The
tool also exports both an attribute table and a coordinate table
for the output features.
"""
import arcpy
InputFileGeodatabase = arcpy.GetParameterAsText(0) # File Geodatabase usually named verk.gdb
InputFeatureClass = arcpy.GetParameterAsText(1) # Must be line feature class usually named braut2d
InputDEM = arcpy.GetParameterAsText(2) # Surface that must overlap input feature class
OutputTableLocation = arcpy.GetParameterAsText(3) # Location for braut3did table id_nafn table created by the tool
# Paths and filenames for outputs
OutputBraut3d = InputFileGeodatabase + "\\brautir\\braut3d"
OutputBraut3did = InputFileGeodatabase + "\\brautir\\braut3did"
OutputBraut3didTable = OutputTableLocation + "\\braut3did.txt"
OutputBraut3dTable = OutputTableLocation + "\\id_nafn.txt"
def generate_3d_features():
number_of_features = str(arcpy.GetCount_management(InputFeatureClass))
arcpy.AddMessage(number_of_features + " segments found")
arcpy.InterpolateShape_3d(InputDEM, InputFeatureClass, OutputBraut3d) # convert 2D to 3D
arcpy.AddMessage("Feature class braut3d created")
arcpy.CalculateField_management(OutputBraut3d, "ID", "!OBJECTID!", "PYTHON_9.3") # populate fields
arcpy.CalculateField_management(OutputBraut3d, "start", "0", "PYTHON_9.3")
arcpy.CalculateField_management(OutputBraut3d, "end", "!shape.length@meters!", "PYTHON_9.3")
arcpy.AddMessage("Fields ID, START and END populated")
arcpy.env.MTolerance = "0.001" # set tolerance for M coordinate
arcpy.CreateRoutes_lr(OutputBraut3d, "ID", OutputBraut3did,
"TWO_FIELDS", "start", "end", "UPPER_LEFT",
"1", "0", "IGNORE", "INDEX")
arcpy.AddMessage("Feature class braut3did created")
def export_braut3did_to_text():
feature_counter = 1
with open(OutputBraut3didTable, 'w') as f:
for row in arcpy.da.SearchCursor(OutputBraut3did, ["SHAPE@"]):
f.write(str(feature_counter) + " 0" + "\n")
feature_counter += 1
for part in row[0]:
node_counter = 1
for pnt in part:
f.write(str(node_counter) + " {0} {1} {2} {3}".format(pnt.X, pnt.Y, pnt.Z, pnt.M) + "\n")
node_counter += 1
f.close()
def export_braut3d_attributes_to_text():
with open(OutputBraut3dTable, 'w') as i:
fields = ["ID", "SEG", "NAFN"]
i.write(fields[0] + " " + fields[1] + " " + fields[2] + "\n")
for row in arcpy.da.SearchCursor(InputFeatureClass, fields):
i.write("{0} {1} {2}".format(row[0], row[1], row[2]) + "\n")
i.close()
# Call function to turn 2D features to 3D, populate fields and add M coordinates
generate_3d_features()
arcpy.AddMessage("Exporting tables...")
# Call function to export coordinate table
export_braut3did_to_text()
arcpy.AddMessage("Coordinate table exported: braut3did.txt")
# Call function to export attribute table
export_braut3d_attributes_to_text()
arcpy.AddMessage("Attribute table exported: id_nafn.txt")
|
#/usr/bin/env python
import sys
from helper import *
from playbooks import *
from group_vars import *
def run():
#
#
#Variable initialization
#
#
yamlFileName = ""
input = {}
#
#
# Check the input arguments.
#
try:
argslen = len(sys.argv)
if argslen > 1:
index = 1
for index in range(argslen):
if sys.argv[index] == "-yaml":
if index == argslen -1:
print("Error in the arguments. Usage: -yaml <inputFile>")
sys.exit()
else:
yamlFileName = sys.argv[index + 1]
except IndexError:
print("An error happened, exiting ... ")
sys.exit()
#
#
# Read the input data
#
#
try:
input = get_config(yamlFileName)
except IOError:
print('No such file or directory, exiting ...')
sys.exit()
except yaml.scanner.ScannerError:
print('Malformed input, exiting ...')
sys.exit()
#
#
# Extract playbooks from input
#
#
inputPlaybooks = input
if 'playbooks' in inputPlaybooks:
inputPlaybooks = input['playbooks']
else:
print("Error: playbooks key not provided in input file")
sys.exit()
#
#
#Process the lines
#
#
renderedPlaybooks = []
for inputPlaybook in inputPlaybooks:
if 'playbookName' in inputPlaybook:
playbook = getPlaybook(inputPlaybook['playbookName'])
if playbook:
renderedPlaybooks.append(runPlaybook(playbook, inputPlaybook, hostName2GroupVar))
else:
print("Error: playbookName value not valid, exiting ...")
sys.exit()
else:
print("Error: playbookName key not provided in playbook input, exiting ...")
sys.exit()
#
#
#Process the output
#
#
for renderedPlaybook in renderedPlaybooks:
for renderedTask in renderedPlaybook:
if renderedTask['printHostName']:
print(printHostName(renderedTask['hostName']))
print(renderedTask['renderedSnippet'] + "\n")
if __name__ == "__main__":
run()
|
from threading import Timer
class Highlight:
def __init__(self):
self.element = None
self.original_style = None
self.timer = None
def apply_style(self, style):
try:
self.element._parent.execute_script("arguments[0].setAttribute('style', arguments[1]);", self.element, style)
except:
pass
def deselect(self):
if self.element is None:
return
self.apply_style(self.original_style)
self.element = None
self.timer = None
def select(self, element):
"""Highlights (blinks) a Selenium Webdriver element"""
self.deselect()
self.element = element
self.original_style = element.get_attribute('style')
self.apply_style("background: yellow; border: 2px solid red;")
self.timer = Timer(0.5, self.deselect)
self.timer.start()
|
from OOP.PlanetSystem_Euler import solarsystem, planet
import numpy as np
import matplotlib.pyplot as plt
#use 100000 steps to see long term effects
n = 1000
tf = 100
ti = 0
h = 0.01
Earth_mass = 0.0001
Sun_mass = 1
Jupiter_mass = 0.001
Earth_posx = 1.0
Earth_posy = 0
Jupiter_posx = 2
Jupiter_posy = 0
Sun_posx = 0
Sun_posy = 0
Earth_velx = 0
Earth_vely = np.pi*2
Jupiter_velx = 0
Jupiter_vely = np.pi*2/np.sqrt(2)
planetlist = [[0,0,0,0,1, "sun"],
[Earth_posx,Earth_posy,Earth_velx,Earth_vely, Earth_mass, "earth"],
[Jupiter_posx,Jupiter_posy,Jupiter_velx,Jupiter_vely, Jupiter_mass, "jupiter"]]
Model1 = solarsystem(h, n, planetlist)
Model1.run()
kenergies1, penergies1, AngMoments1 = Model1.showConservation(False)
#changes the class of solarsystem so that it uses the VV method
from OOP.PlanetSystem_VV import solarsystem, planet
Model2 = solarsystem(h, n, planetlist)
Model2.run()
kenergies2, penergies2, AngMoments2 = Model2.showConservation(False)
#plots for both energies and angular momentum for both euler and VV
for i in range(len(planetlist)):
plt.plot(range(n), kenergies1[:,i], label=planetlist[i][5]+"Euler")
plt.plot(range(n), kenergies2[:,i], label=planetlist[i][5]+"VV")
plt.legend()
plt.title("Kinetic Energy of the system over {} steps".format(n))
plt.show()
for i in range(1,len(planetlist)):
plt.plot(range(n), penergies1[:,i-1], label=planetlist[i][5]+"Euler")
plt.plot(range(n), penergies2[:,i-1], label=planetlist[i][5]+"VV")
plt.legend()
plt.title("Potential Energy of the system over {} steps".format(n))
plt.show()
for i in range(1,len(planetlist)):
plt.plot(range(n), AngMoments1[:,i-1], label=planetlist[i][5]+"Euler")
plt.plot(range(n), AngMoments2[:,i-1], label=planetlist[i][5]+"VV")
plt.legend()
plt.title("Potential Energy of the system over {} steps".format(n))
plt.show()
|
import os
SECRET_KEY = '123qwe456ghj'
pg_host = os.environ.get('POSTGRES_PORT_5432_TCP_ADDR', '192.168.99.100')
pg_port = os.environ.get('POSTGRES_PORT_5432_TCP_PORT', '5432')
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:postgres@{}:{}/brotherhood'.format(pg_host, pg_port)
SQLALCHEMY_TRACK_MODIFICATIONS = True
|
from rest_framework import serializers
from . import models
class PostSerializer(serializers.ModelSerializer):
class Meta:
fields = ('id', 'name', 'age','gender','country','remarks', 'created_at', 'updated_at',)
model = models.StudentModel
|
''' VARIABLES EXPECTED:
a) Trade-Off Parameter (Alpha)
b) Weight/Reputation Score (Gamma)
c) Last Time The Agent was selected (b)
RETURNS a LIST of addresses of SAMPLED AGENTS
'''
#agents_record = {"ETH_ADDRESS":[GAMMA,B_VAL]}
from dataForAgentSelection import agents_record
from collections import defaultdict,OrderedDict
def calc_sum(agents_record):
sum_gamma = 0
sum_b_val = 0
for items in agents_record.keys():
sum_gamma+=agents_record[items][0]
sum_b_val+=agents_record[items][1]
return sum_gamma,sum_b_val
def calc_probabilities(agents_record,trade_off_param):
ret_mapping = defaultdict(int)
sum_gamma,sum_b_val = calc_sum(agents_record)
for items in agents_record.keys():
agent_prob = (trade_off_param*(agents_record[items][0]/sum_gamma)) + ((1-trade_off_param)*(agents_record[items][1]/sum_b_val))
ret_mapping[items] = agent_prob
return ret_mapping
def sample_agents(number,final_structure):
ret_list = []
dd = OrderedDict(sorted(final_structure.items(), key = lambda x: x[1],reverse=True))
dd = dict(dd)
counter = 0
for items in dd.keys():
if counter == number:
break
ret_list.append(items)
counter+=1
return ret_list
##DRIVER##
if __name__ == '__main__':
print("The Sampled Agents are:")
#a_record = {"ascaadcadcac":[0.5,0.4],"ssacdcdac":[0.9,0.4],"adscdac":[0.8,0.9]}
trade_off = 0.6
final = calc_probabilities(agents_record,trade_off)
print(sample_agents(6,final))
|
favorite_language='python '
print(favorite_language.rstrip())
favorite_language=' python '
print(favorite_language.lstrip())
print(favorite_language.strip())
|
#!/usr/bin/python
import datetime
import time
import serial
import serial.tools.list_ports
import requests
import json
from const import Constant
from logmessages import LogMessage
class ReadTemperature:
const = ''
logMessage = ''
def __init__(self):
self.const = Constant()
self.logMessage = LogMessage()
def run(self):
try:
while True:
readings = self.readDataFromUSB()
# readings = [' ', '', 'RH=31.4 ', '', 'T=+23.3 ', '', 'RH=31.4 ', '', 'T=-23.4 ']
if len(readings) > 0:
data = self.processData(readings)
self.uploadDataToAws(data)
self.logMessage.logBySection('Data saved : ' + str(datetime.datetime.now(self.const.APPLICATION_TIMEZONE)),
self.const.LOG_SECTION_TEMPERATURE)
time.sleep(40)
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
self.run()
pass
def processData(self, log):
rows = []
v1 = ''
v2 = ''
try:
for data in log:
reading = data.strip()
if len(reading) > 0:
final = reading.split('=')
if len(final) > 0:
if final[0] == 'RH':
v1 = final[1]
elif final[0] == 'T':
v2 = final[1]
if len(v1) > 0 and len(v2) > 0:
rows.append([v1, v2])
v1 = ''
v2 = ''
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
pass
return rows
def readDataFromUSB(self):
data = []
try:
serialPort = serial.Serial('/dev/ttyUSB0', baudrate=2400, timeout=10)
temperatureReading = serialPort.read(1024)
if len(temperatureReading) > 0:
data = temperatureReading.splitlines()
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
pass
return data
def uploadDataToAws(self, log):
try:
postData = json.dumps(log)
r = requests.post(self.const.AWS_URL, data=postData)
self.logMessage.logBySection('Response : ' + str(r.text), self.const.LOG_SECTION_TEMPERATURE)
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
pass
def sendDataToSheet(self, data):
try:
# scope = [self.const.SOURCE_URL]
# creds = ServiceAccountCredentials.from_json_keyfile_name(self.const.CLIENT_KEY_FILE, scope)
# client = gspread.authorize(creds)
client = gspread.login('developersa48@gmail.com', 'rrkelocjnerxxfox')
# sheet = client.open(self.const.SHEET_NAME).sheet1
sheet = client.open('livoltTemperature').sheet1
for reading in data:
sheet.append_row(reading)
except Exception, e:
self.logMessage.logBySection('Error Message : ' + str(e), self.const.LOG_SECTION_TEMPERATURE)
pass
obReadRemp = ReadTemperature()
obReadRemp.run()
|
# Generated by Django 2.2.4 on 2020-03-22 11:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("budget", "0008_auto_20200223_2124")]
operations = [
migrations.CreateModel(
name="QuarterTotal",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("year", models.PositiveIntegerField()),
("quarter", models.PositiveIntegerField()),
("amount_pln", models.DecimalField(decimal_places=2, max_digits=8)),
("amount_gbp", models.DecimalField(decimal_places=2, max_digits=8)),
("amount_usd", models.DecimalField(decimal_places=2, max_digits=8)),
("amount_safe", models.DecimalField(decimal_places=2, max_digits=8)),
("amount_kejt", models.DecimalField(decimal_places=2, max_digits=8)),
("amount_mewash", models.DecimalField(decimal_places=2, max_digits=8)),
("date_added", models.DateField(auto_now_add=True)),
("note", models.TextField(blank=True, default=None)),
],
)
]
|
from django.contrib.auth import logout, login
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.views import LoginView
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import CreateView
# from django.contrib.auth.models import User
from app_users.forms import RegisterUserForm, LoginUserForm
class RegisterUser (CreateView):
""" Класс формы регистрации пользователя """
# form_class = UserCreationForm # базовая форма django для регистрации
form_class = RegisterUserForm # своя форма из forms.py
template_name = 'user/user/base_register.html'
success_url = reverse_lazy('login')
def form_valid(self, form):
""" После успешной регистрации сразу авторизуем """
user = form.save()
login(self.request, user, backend='django.contrib.auth.backends.ModelBackend')
return redirect('home')
class LoginUser (LoginView):
""" Класс формы автризации пользователя """
# form_class = AuthenticationForm # базовая форма django для авторизации
form_class = LoginUserForm # своя форма из forms.py
template_name = 'user/user/base_login.html'
success_url = reverse_lazy('login') # Перенаправление при успешной авторизации
def get_success_url(self):
""" Перенаправление при успешной авторизации """
return reverse_lazy('home')
def logout_user(request):
logout(request)
return redirect('login')
|
from tremendous.client import Tremendous
from tremendous.version import __version__
__all__ = ['Tremendous', '__version__']
|
# Generated by Django 3.2.5 on 2021-08-08 12:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog_app', '0002_auto_20210808_1745'),
]
operations = [
migrations.AlterModelOptions(
name='blog',
options={'ordering': ['-publish_date']},
),
migrations.AlterField(
model_name='blog',
name='blog_image',
field=models.ImageField(upload_to='blog_images', verbose_name='Image'),
),
]
|
# Generated by Django 3.0.8 on 2020-08-12 22:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('data', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='customer',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='appstorereview',
name='app',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='data.App'),
),
migrations.AddField(
model_name='app',
name='similar',
field=models.ManyToManyField(blank=True, related_name='_app_similar_+', to='data.App'),
),
migrations.AlterUniqueTogether(
name='app',
unique_together={('appid', 'primaryCountry')},
),
]
|
import subprocess
import basetest
import time
class TestCaseEmitMetrics(basetest.BaseTest):
def setUp(self):
self.setUpCF('sample-6.2.0.mda')
subprocess.check_call(('cf', 'set-env', self.app_name, 'METRICS_INTERVAL', '10'))
self.startApp()
def test_read_metrics_in_logs(self):
time.sleep(10)
self.assert_string_in_recent_logs(self.app_name, 'MENDIX-METRICS: ')
self.assert_string_in_recent_logs(self.app_name, 'storage')
self.assert_string_in_recent_logs(self.app_name, 'number_of_files')
self.assert_string_in_recent_logs(self.app_name, 'critical_logs_count')
|
from replit import clear
from art import logo
#HINT: You can call clear() to clear the output in the console.
print(logo)
print("Welcome to the Secret Auction Program")
ans=True #Flag
bidders={}
while ans:
name=input("What's your name?\n")
bid=int(input("What's your bid?\n"))
bidders[name]=bid #adding key,values to the bidders dictionary.
print("Are there any more bidders?:Yes or No")
ans=input().lower()
if ans=="yes":
clear() #Clear function to hide previous bids
elif ans=="no":
ans=False
max_bid=0
winner=''
#Accessing dict key & values
for bidder in bidders:
bid=bidders[bidder]
if bid > max_bid:
max_bid=bid
winner=bidder
print("The winner of the Secret Auction is {} with the bid of ${}".format(winner,max_bid))
#highest_bidder(price)
print(bidders)
else:
print("Wrong input")
|
from urllib import quote as url_quote
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from spellcorrector.views import Spellcorrector, tokenize_text, remove_stopwords
spellcorrector_instance = Spellcorrector()
spellcorrector_instance.load()
from models import Document
def document_to_words(document):
all_words = tokenize_text(document.title + ' ' + document.body)
all_words = remove_stopwords(list(set(all_words)))
return all_words
from django.db.models.signals import post_save, post_delete, pre_save
def train_on_document(sender, instance, created, **__):
all_words = document_to_words(instance)
spellcorrector_instance.train(all_words)
spellcorrector_instance.save()
post_save.connect(train_on_document, sender=Document)
def untrain_on_document(sender, instance, **kwargs):
all_words = document_to_words(instance)
spellcorrector_instance.untrain(all_words)
spellcorrector_instance.save()
post_delete.connect(untrain_on_document, sender=Document)
def retrain_all_documents(instance):
"""handy function for when you've fiddled too much with you model data with the
risk of the train words not being up to date.
Also, if you change a document, it will just train on the new stuff not
untrain on the old stuff. Admittedly, if you typed it once you're most
likely right the first time and that doesn't hurt to count.
In a more realistic app you might want to put this under protection since
it's a slow process and you don't want to allow anonymous calls dos your
site.
"""
instance.reset()
all_words = set()
for document in Document.objects.all():
all_words.update(document_to_words(document))
instance.train(all_words)
instance.save()
def documents(request):
documents = Document.objects.all()
if request.GET.get('q'):
q = request.GET.get('q')
q_corrected = spellcorrector_instance.correct(q)
if q != q_corrected:
documents = documents.filter(Q(title__icontains=q_corrected) \
| Q(body__icontains=q_corrected))
else:
documents = documents.filter(Q(title__icontains=q) | Q(body__icontains=q))
return render_to_response('documents.html', locals(),
context_instance=RequestContext(request))
def documents_a_la_google(request):
"""put in a link that says _Did you mean: *correction*_"""
documents = Document.objects.all()
if request.GET.get('q'):
q = request.GET.get('q')
documents = documents.filter(Q(title__icontains=q) | Q(body__icontains=q))
corrected = spellcorrector_instance.correct(q)
print "corrected", repr(corrected)
if corrected != q:
correction = {'query_string': url_quote(corrected),
'correction': corrected}
return render_to_response('documents-a-la-google.html', locals(),
context_instance=RequestContext(request)) |
import glob
import math
import os.path as osp
import numpy as np
import torch.utils.data as data
"""# Data Loader"""
def make_data_path_list(phase="train"):
"""
Parameters
----------
phase : 'train' or 'val'
Returns
-------
path_list : list
"""
rootpath = "./data/"
target_path = osp.join(rootpath + phase + '/**/*/')
path_list = []
for path in glob.glob(target_path):
path_list.append(path)
return path_list
class Dataset(data.Dataset):
"""
Attributes
----------
file_list : list
transform : object
phase : 'train' or 'test'
"""
def __init__(self, file_list, phase='train', input_size=200):
self.file_list = file_list # file path
# self.transform = transform #
self.phase = phase # train or val
self.size = input_size
def __len__(self):
return len(self.file_list)
def __getitem__(self, index):
read_list = ["rho", "u", "v", "pressure"]
##########
# Input
input_labels = []
attention_maps = []
##########
# Output
fluid = []
shock_labels = []
data_path = self.file_list[index]
# delta_x =0
# delta_y =0
# delta_x = random.randrange(256 - self.size)
# delta_y = random.randrange(256 - self.size)
delta_x = int((256 - self.size) / 2)
delta_y = int((256 - self.size) / 2)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# distance
### 転置が掛かってるかもだけど、なんかおかしい。向きに注意
### 保存するときに転置かけてからほぞんしている
data_path_distance = "/".join(data_path.split("\\")[:2])
data_path_distance = data_path_distance + "/distance.csv"
# data_path_distance = data_path_distance + "/distance_diff.csv"
data_distance = np.loadtxt(data_path_distance, dtype="float", delimiter=",")
data_distance = data_distance / (math.sqrt(2) * self.size)
# print(data_distance)
# data_distance=data_distance.T
data_distance = data_distance[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
# import matplotlib.pyplot as plt
# plt.imshow(data_distance)
# plt.colorbar()
# plt.show()
data_distance = np.reshape(data_distance, (1, self.size, self.size))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# boundary
data_path_boundary = "/".join(data_path.split("\\")[:2])
data_path_boundary = data_path_boundary + "/modified_boundary.csv"
boundary = np.loadtxt(data_path_boundary, dtype="float", delimiter=",")
boundary = boundary[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
# plt.imshow(boundary)
# plt.colorbar()
# plt.show()
boundary = np.reshape(boundary, (1, self.size, self.size))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# input_labels
temp_label = data_path.split("\\")[2].split("_")
input_labels.append(float(temp_label[0][4:]))
input_labels.append(float(temp_label[1][5:]))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# fluid
for item in read_list:
# path = osp.join(data_path + "/" + item + ".csv")
path = osp.join(data_path + item + ".csv")
data = np.loadtxt(path, delimiter=",")
data = data.T
data = data[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
data = np.reshape(data, (self.size, self.size))
# plt.imshow(data)
# plt.colorbar()
# plt.show()
fluid.append(data)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# shock_labels
shock_path = osp.join(data_path + "/" + "result" + ".csv")
shock = np.loadtxt(shock_path, delimiter=",")
# shock = shock.T
shock = shock[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
shock = np.reshape(shock, (self.size, self.size))
# plt.imshow(shock)
# plt.colorbar()
# plt.show()
shock_labels.append(shock)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
## attention map
# attention_path= osp.join(data_path + "/" + "attention" + ".csv")
# attention= np.loadtxt(attention_path, delimiter=",")
# attention=attention[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
# attention= np.reshape(attention, (self.size, self.size))
# # plt.imshow(shock)
# # plt.colorbar()
# # plt.show()
# attention_maps.append(attention)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# summary
boundary = np.array(boundary, dtype="float64")
distance = np.array(data_distance, dtype="float64")
input_labels = np.array(input_labels, dtype="float64")
fluid = np.array(fluid, dtype="float64")
shock_labels = np.array(shock_labels, dtype="float64")
attention_maps = np.array(attention_maps, dtype="float64")
# return boundary, distance, input_labels, fluid, shock_labels,attention_maps
return boundary, distance, input_labels, fluid, shock_labels
class Dataset_Test(data.Dataset):
"""
Attributes
----------
file_list : list
transform : object
phase : 'train' or 'test'
"""
def __init__(self, file_list, phase='train', input_size=200):
self.file_list = file_list # file path
# self.transform = transform #
self.phase = phase # train or val
self.size = input_size
def __len__(self):
return len(self.file_list)
def __getitem__(self, index):
read_list = ["rho", "u", "v", "pressure"]
##########
# Input
input_labels = []
attention_maps = []
##########
# Output
fluid = []
shock_labels = []
data_path = self.file_list[index]
# delta_x =0
# delta_y =0
# delta_x = random.randrange(256 - self.size)
# delta_y = random.randrange(256 - self.size)
delta_x = int((256 - self.size) / 2)
delta_y = int((256 - self.size) / 2)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# distance
### 転置が掛かってるかもだけど、なんかおかしい。向きに注意
### 保存するときに転置かけてからほぞんしている
data_path_distance = "/".join(data_path.split("\\")[:2])
data_path_distance = data_path_distance + "/distance.csv"
# data_path_distance = data_path_distance + "/distance_diff.csv"
data_distance = np.loadtxt(data_path_distance, dtype="float", delimiter=",")
data_distance = data_distance / (math.sqrt(2) * self.size)
# print(data_distance)
# data_distance=data_distance.T
data_distance = data_distance[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
# import matplotlib.pyplot as plt
# plt.imshow(data_distance)
# plt.colorbar()
# plt.show()
data_distance = np.reshape(data_distance, (1, self.size, self.size))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# boundary
data_path_boundary = "/".join(data_path.split("\\")[:2])
data_path_boundary = data_path_boundary + "/modified_boundary.csv"
boundary = np.loadtxt(data_path_boundary, dtype="float", delimiter=",")
boundary = boundary[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
# plt.imshow(boundary)
# plt.colorbar()
# plt.show()
boundary = np.reshape(boundary, (1, self.size, self.size))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# input_labels
temp_label = data_path.split("\\")[2].split("_")
input_labels.append(float(temp_label[0][4:]))
input_labels.append(float(temp_label[1][5:]))
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# fluid
for item in read_list:
# path = osp.join(data_path + "/" + item + ".csv")
path = osp.join(data_path + item + ".csv")
data = np.loadtxt(path, delimiter=",")
data = data.T
data = data[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
data = np.reshape(data, (self.size, self.size))
# plt.imshow(data)
# plt.colorbar()
# plt.show()
fluid.append(data)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# shock_labels
shock_path = osp.join(data_path + "/" + "result" + ".csv")
shock = np.loadtxt(shock_path, delimiter=",")
# shock = shock.T
shock = shock[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
shock = np.reshape(shock, (self.size, self.size))
# plt.imshow(shock)
# plt.colorbar()
# plt.show()
shock_labels.append(shock)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
## attention map
# attention_path= osp.join(data_path + "/" + "attention" + ".csv")
# attention= np.loadtxt(attention_path, delimiter=",")
# attention=attention[delta_y:self.size + delta_y, delta_x:self.size + delta_x]
# attention= np.reshape(attention, (self.size, self.size))
# # plt.imshow(shock)
# # plt.colorbar()
# # plt.show()
# attention_maps.append(attention)
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
# summary
boundary = np.array(boundary, dtype="float64")
distance = np.array(data_distance, dtype="float64")
input_labels = np.array(input_labels, dtype="float64")
fluid = np.array(fluid, dtype="float64")
shock_labels = np.array(shock_labels, dtype="float64")
attention_maps = np.array(attention_maps, dtype="float64")
# return boundary, distance, input_labels, fluid, shock_labels,attention_maps
return boundary, distance, input_labels, fluid, shock_labels, data_path
def load(phase, batch_size, input_size):
if phase == "train" or "val":
dataset = Dataset(file_list=make_data_path_list(phase),
phase=phase,
input_size=input_size)
dataloader = data.DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=6
)
if phase == "test":
dataset = Dataset_Test(file_list=make_data_path_list(phase),
phase=phase,
input_size=input_size)
dataloader = data.DataLoader(
dataset, batch_size=10, shuffle=False, num_workers=8
)
return dataloader
######################################################################
|
from lxml import etree
tree = etree.parse("nlp.txt.xml")
root = tree.getroot()
docment = root[0]
sentences = docment.find("sentences")
coreferences = docment.find("coreference")
def sentence_text(sentence):
return " ".join([token.find("word").text for token in sentence.find("tokens")])
def replaced_sentence(sentence, start, end, head, representative):
tokens = sentence.find("tokens")
words = []
special_words = []
for token in tokens:
token_id = int(token.attrib["id"])
if token_id >= start and token_id < end:
special_words.append(token.find("word").text)
elif token_id == end:
org = " ".join(special_words)
words.append("{} ({})".format(representative, org))
else:
words.append(token.find("word").text)
return " ".join(words)
def parse_mention(mention):
sentence_id = int(mention.find("sentence").text)
start = int(mention.find("start").text)
end = int(mention.find("end").text)
head = int(mention.find("head").text)
text = mention.find("text").text
return sentence_id, start, end, head, text
for idx, coreference in enumerate(coreferences):
mentions = coreference.findall("mention")
assert len(mentions) > 0
representative_mention = coreference.xpath(
'mention[@representative="true"]')[0]
r_sentence_id, r_start, r_end, r_head, r_text = parse_mention(
representative_mention)
print("**representative text**: {}".format(r_text))
for mention in mentions:
if "representative" in mention.attrib and mention.attrib["representative"]:
continue
sentence_id, start, end, head, text = parse_mention(mention)
sentence = sentences.xpath("//sentence[@id={}]".format(sentence_id))[0]
text = sentence_text(sentence)
replaced_text = replaced_sentence(sentence, start, end, head, r_text)
print(replaced_text)
print("")
|
if True:
print("c'est vrai")
x = True
if x:
print(" X est vrai")
else:
print(" X n'est pas vrai")
loc = "banque"
if loc == "auto":
print("Bienvenu au magasin auto")
elif loc == "banque":
print("Bienvenu à la banque")
else:
print("Au revoir")
|
from itertools import permutations
from itertools import combinations
def dist(a,b):
return abs(a[1]-b[1])**2 + abs(a[0]-b[0])**2
[n,m,k] = list(map(int,str(input()).split(" ")))
vol = []
med = []
for x in range(n):
vol.append(list(map(int,str(input()).split(" "))))
for y in range(m):
med.append(list(map(int,str(input()).split(" "))))
out = []
for c in vol:
info = []
for d in med:
info.append(dist(c,d))
info.sort()
out.append(info)
out.sort()
output = [[row[z] for row, z in zip(out, permutation)]
for permutation in permutations(range(len(out)))]
arr = []
for i in output:
arr.extend([*combinations(i, k)])
print(min(list(map(max,arr))))
|
import os
import logging
import logging.handlers
def SetupLogs(path):
"""
Helper function for creating a logs for whole client.
"""
# Create logging
logger = logging.getLogger("miner_watchdog")
# Switch files each day. Save backup for last 7 days
file_handler = logging.handlers.TimedRotatingFileHandler( os.path.abspath( os.path.join(path, "./logs/miner_watchdog.log") ), when="D", interval=1, backupCount=7)
# Format for log messages
formatter = logging.Formatter("%(name)s %(levelname)s %(asctime)s %(message)s")
# Output everything
logger.setLevel(logging.DEBUG)
# Set format and file handler
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger |
"""
Configuration of syllabus server.
Edit to fit development or deployment environment.
"""
PORT=5000
DEBUG = True # Set to False for production use
schedule="data/schedule.txt"
|
from django import forms
from .models import Modify_Result
class First_Form(forms.ModelForm):
class Meta:
model = Modify_Result
fields = '__all__'
label = {'Person_Name' : 'Name ' , 'Pull_Ups' : 'Pull Ups' , 'Push_Ups' : 'Push Ups','Chin_Ups' : 'Chin Ups'}
# required widgets label initial
'''
def clean_Person_Name(self):
return self.Person_Name
'''
def clean_Pull_Ups(self):
number_pull = self.cleaned_data.get('Pull_Ups')
if number_pull > 20:
raise forms.ValidationError('Must Perform Under 20')
return number_pull
def clean_Push_Ups(self):
number_push = self.cleaned_data.get('Push_Ups')
if number_push > 50:
raise forms.ValidationError('Must Perform Under 50')
return number_push
def clean_Chin_Ups(self):
number_chin = self.cleaned_data.get('Chin_Ups')
if number_chin > 20:
raise forms.ValidationError('Must Perform Under 20')
return number_chin
class Radio_Form(forms.Form):
RADIO = [('male','Male'),
('female','Female')]
gender = forms.CharField(label='Select your gender',widget=forms.RadioSelect(choices=RADIO))
|
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters('localhost')
)
channel = connection.channel() # 声明一个管道
# 声明queue
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!'
)
print("[X] send 'Hello World'")
connection.close()
|
# -*- coding: utf-8 -*-
from django.conf import settings
from modeltranslation.translator import TranslationOptions
class BaseTranslationOptions(TranslationOptions):
required_languages = (settings.DEFAULT_LANGUAGE,)
fallback_languages = {'default': settings.LANGUAGE_CODES}
empty_values = ''
def get_model_translation_fields(model, with_original_fields=True):
"""
Получает список всех переводимых полей для модели
:param model - модель, для которой ищутся переводимые поля
:param with_original_fields - добавляет оригинальные поля переводов (fallback поля)
"""
fields = []
if with_original_fields:
fields.extend(model.translation_fields)
for language in settings.LANGUAGE_CODES:
fields.extend('%s_%s' % (x, language) for x in model.translation_fields)
return fields
def get_model_translation_suit_tabs(model):
"""
Получает список языковых табов для вывода в карточке объекта
"""
fields_maps = {x.attname: x.verbose_name for x in model._meta.fields}
return (('general', 'Основное'),) + tuple(
(x, fields_maps.get(x, x)) for x in model.translation_fields
)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class AppledailyItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
#日期
date = scrapy.Field()
#标题
title = scrapy.Field()
print(title)
# 网页链接
links = scrapy.Field()
#人气
view = scrapy.Field()
#视频路径
video_link = scrapy.Field()
|
import bisect as bisect
N = int(input())
*A, = map(int, input().split())
B = [0]
for x in A:
B.append(B[-1]+x)
ans = 10**15
for i in range(2, N-1):
print(i)
m = sum(A[:i])/i
M = sum(A[i:])/(N-i)
n = bisect.bisect(B,m)
l = bisect.bisect(B,M)
if abs(B[i]-B[n]-B[n]+B[0]) <= abs(B[i]-B[n-1]-B[n-1]+B[0]):
C = abs(B[i]-B[n])
D = abs(B[n]-B[0])
else:
C = abs(B[i]-B[n-1])
D = abs(B[n-1]-B[0])
if abs(B[N]-B[l]-B[l]+B[i]) <= abs(B[N]-B[l-1]-B[l-1]+B[i]):
E = abs(B[N]-B[l])
F = abs(B[l]-B[i])
else:
E = abs(B[N]-B[l-1])
F = abs(B[l-1]-B[i])
print([C,D,E,F])
ans = min(ans, max([C,D,E,F])-min([C,D,E,F]))
print(ans)
|
day = 0
q1 = 'Is your Birthday in Set 1?\n \
1 3 5 7\n\
9 11 13 15\n \
17 19 21 23\n \
25 27 29 31\n \
\nEnter Yes or No: '
answer = input(q1)
if answer == 'Yes':
day += 1
q2 = 'Is your Birthday in Set 2?\n \
2 3 6 7\n \
10 11 14 15\n \
18 19 22 23\n \
26 27 30 31\n \
\nEnter Yes or No: '
answer = input(q2)
if answer == 'Yes':
day += 2
q3 = 'Is your Birthday in Set 3?\n \
4 5 6 7\n \
12 13 14 15\n \
20 21 22 23\n \
28 29 30 31\n \
\nEnter Yes or No: '
answer = input(q3)
if answer == 'Yes':
day += 4
q4 = 'Is your Birthday in Set 4?\n \
8 9 10 11\n \
12 13 14 15\n \
24 25 26 27\n \
28 29 30 31\n \
\nEnter Yes or No: '
answer = input(q4)
if answer == 'Yes':
day += 8
q5 = 'Is your Birthday in Set 5?\n \
16 17 18 19\n \
20 21 22 23\n \
24 25 26 27\n \
28 29 30 31\n \
\nEnter Yes or No: '
answer = input(q5)
if answer == 'Yes':
day += 16
print(f'\nYour birthday is: {day}!') |
from django.urls import path
from rest_framework.authtoken.views import obtain_auth_token
from .views import (mainPageData,
messageBox,
messages,
addMessages,
addFeedback,
signupAsProvider,
logout,
account,
setFirstname,
setLastname,
setEmail,
setPassword,
setMyAddr,
setLoc,
setMyNo,
setShopName,
ShopCatagories,
updateShopCatagory,
updateMainImage,
updateImage,
addNewImage,
setOpenTime,
setCloseTime,
setRentalStatus,
setNoOfItems,
setPriceType,
updateServiceAddr,
deleteSearchName,
deleteImage,
addSearchName,
addNewService,
search,
productData,
addNewSmsBox,
giveRating,
addServiceFeed,
updateDesc,
removeItem,
FAQData,
posts,
addPostComment,
removePostComment,
addPostCommentReply,
removePostCommentReply,
addPostLike,
savePost,
myPosts,
activatePostTogle,
addNewPost,
savedServices,
)
app_name = 'main'
urlpatterns = [
path('api-token-auth/', obtain_auth_token, name='api_token_auth'),
path('mainPageData/', mainPageData,name='main_page_data'),
path('messageBox/', messageBox, name='messageBox'),
path('messages/', messages, name='messages'),
path('addMessages/', addMessages, name='addMessages'),
path('signupAsProvider/', signupAsProvider,name='signupAsProvider'),
path('logout/', logout, name='logout'),
path('addFeedback/', addFeedback, name='addFeedback'),
path('account/', account, name='account'),
path('setFirstname/',setFirstname, name='setFirstname'),
path('setLastname/',setLastname, name='setLastname'),
path('setEmail/',setEmail, name='setEmail'),
path('setPassword/',setPassword, name='setPassword'),
path('setMyAddr/',setMyAddr, name='setMyAddr'),
path('setLoc/',setLoc, name='setLoc'),
path('setMyNo/',setMyNo, name='setMyNo'),
path('setShopName/',setShopName, name='setShopName'),
path('ShopCatagories/',ShopCatagories, name='ShopCatagories'),
path('updateShopCatagory/',updateShopCatagory, name='updateShopCatagory'),
path('updateMainImage/',updateMainImage, name='updateMainImage'),
path('updateImage/',updateImage, name='updateImage'),
path('addNewImage/',addNewImage,name='addNewImage'),
path('setOpenTime/',setOpenTime,name='setOpenTime'),
path('setCloseTime/',setCloseTime,name='setCloseTime'),
path('setRentalStatus/',setRentalStatus,name='setRentalStatus'),
path('setNoOfItems/',setNoOfItems,name='setNoOfItems'),
path('setPriceType/',setPriceType,name='setPriceType'),
path('updateServiceAddr/',updateServiceAddr,name='updateServiceAddr'),
path('deleteSearchName/',deleteSearchName,name='deleteSearchName'),
path('deleteImage/',deleteImage,name='deleteImage'),
path('addSearchName/',addSearchName,name='addSearchName'),
path('addNewService/',addNewService,name='addNewService'),
path('search/',search,name='search'),
path('productData/',productData,name='productData'),
path('addNewSmsBox/',addNewSmsBox,name='addNewSmsBox'),
path('giveRating/',giveRating,name='giveRating'),
path('addServiceFeed/',addServiceFeed,name='addServiceFeed'),
path('updateDesc/',updateDesc,name='updateDesc'),
path('removeItem/',removeItem,name='removeItem'),
path('FAQData/',FAQData,name='FAQData'),
path('posts/',posts,name='posts'),
path('addPostComment/',addPostComment,name='addPostComment'),
path('removePostComment/',removePostComment,name='removePostComment'),
path('addPostCommentReply/',addPostCommentReply,name='addPostCommentReply'),
path('removePostCommentReply/',removePostCommentReply,name='removePostCommentReply'),
path('addPostLike/',addPostLike,name='addPostLike'),
path('savePost/',savePost,name='savePost'),
path('myPosts/',myPosts,name='myPosts'),
path('activatePostTogle/',activatePostTogle,name='activatePostTogle'),
path('addNewPost/',addNewPost,name='addNewPost'),
path('savedServices/',savedServices,name='savedServices'),
]
|
#!/usr/bin/env python3
import logging
import signal
from sonosco.inference.las_inference import LasInference
from sonosco.ros1.server import SonoscoROS1
from roboy_cognition_msgs.srv import RecognizeSpeech
from roboy_control_msgs.msg import ControlLeds
from mic_client import MicrophoneClient
# from std_msgs.msg import Empty
# model_path = "pretrained/deepspeech_final.pth"
model_path = "pretrained/las_model_5.pt"
# asr = DeepSpeech2Inference(model_path)
asr = LasInference(model_path)
leave = False
got_a_sentence = False
def handle_int(sig, chunk):
global leave, got_a_sentence
leave = True
got_a_sentence = True
signal.signal(signal.SIGINT, handle_int)
def vad_callback(request, publishers):
msg = ControlLeds()
msg.mode = 2
msg.duration = 0
publishers['ledmode'].publish(msg)
with MicrophoneClient() as audio_input:
audio = audio_input.request_audio()
transcription = asr.infer(audio)
# msg = Empty()
# publishers['ledfreez'].publish(msg)
return transcription
CONFIG = {
'node_name': 'roboy_speech_recognition',
'workers': 5,
'subscribers': [
{
'name': 'recognition',
'topic': '/roboy/cognition/speech/recognition',
'service': RecognizeSpeech,
'callback': vad_callback,
},
{
'name': 'recognition_german',
'topic': '/roboy/cognition/speech/recognition/german',
'service': RecognizeSpeech,
'callback': vad_callback,
}
],
'publishers': [
{
'name': 'ledmode',
'topic': '/roboy/control/matrix/leds/mode',
'message': ControlLeds,
'kwargs': {
'queue_size': 3
}
},
{
'name': 'ledoff',
'topic': '/roboy/control/matrix/leds/off',
'message': ControlLeds,
'kwargs': {
'queue_size': 10
}
},
{
'name': 'ledfreez',
'topic': '/roboy/control/matrix/leds/freeze',
'message': ControlLeds,
'kwargs': {
'queue_size': 1
}
}
],
}
def main(args=None):
"""
ROS1 server that handles speech recognition requests
Args:
args:
Returns:
"""
with SonoscoROS1(CONFIG) as server:
server.run()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
# Generated by Django 3.2.6 on 2021-08-23 15:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0002_alter_customer_id'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='id',
field=models.IntegerField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='cartproduct',
name='id',
field=models.IntegerField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='product',
name='id',
field=models.IntegerField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='productphoto',
name='id',
field=models.IntegerField(primary_key=True, serialize=False),
),
]
|
import collections
import copy
import pprint as ppr
_printer = ppr.PrettyPrinter(indent=2)
def pprint(x):
_printer.pprint(x)
return x
def fmap(f, d):
m = {}
for k, v in d.items():
m[k] = f(v)
return m
def group_by(coll, f):
d = {}
for x in coll:
k = f(x)
lst = d.get(k, [])
lst.append(x)
d[k] = lst
return d
def first(coll):
return coll[0]
def last(coll):
return coll[-1]
def getter(n):
return lambda x: x[n]
def update(d, k, f, *args):
nd = copy.deepcopy(d)
nd[k] = f(d.get(k, None), *args)
return nd
def assoc(d, k, v):
nd = copy.deepcopy(d)
nd[k] = v
return nd
def zipmap(ks, vs):
return {k: v for (k, v) in zip(ks, vs)}
def normal_dict(d):
if isinstance(d, collections.OrderedDict):
d = dict(d)
for k, v in d.items():
d[k] = normal_dict(v)
return d
elif isinstance(d, list):
return list(map(normal_dict, d))
else:
return d
|
import numpy as np
import scipy
import scipy.special
gamma = scipy.special.gamma
# this is a function to make a GARCH(1,1) timeseries of length N
def generateX(N, omega, alpha ,beta, nu, sigma1):
X = np.zeros(N)
sigmasquared = sigma1 * np.ones(N)
Z = np.sqrt((nu - 2) / nu) * np.random.standard_t(nu, N)
X[1] = np.sqrt(sigmasquared[1]) * Z[1]
for i in range(2, N):
sigmasquared[i] = omega + alpha * sigmasquared[i - 1] + beta * X[i - 1] ** 2
X[i] = np.sqrt(sigmasquared[i]) * Z[i]
return X
# this is a function that given a timeseries X and a set of parameters params finds MINUS the log likelihood
# following GARCH
def logli(params, X):
N = len(X)
omega, alpha, beta, nu, sigma1 = params
sigmasquared = sigma1 ** 2 * np.ones(len(X))
for i in range(2, N):
sigmasquared[i] = omega + alpha * sigmasquared[i - 1] + beta * X[i - 1] ** 2
return - ( N * np.log( gamma((nu + 1)/2) / gamma(nu/2) / np.sqrt((nu - 2)* np.pi) ) \
- np.sum( (nu + 1)/2 * np.log(1 + np.divide(np.power(X, 2), sigmasquared)/(nu - 2)) \
+ np.log(sigmasquared)/2 ) )
# this function generates GARCH(1,1) data for a certain set of parameters
# and then fits parameters to the data
# it repeats this "runs" time .... and then shows the results
def chkft(runs):
N = 10000
omega = 0.1
alpha = 0.5
beta = 0.4
nu = 4.0
sigma1 = omega / (1 - alpha - beta)
params = np.array([omega, alpha, beta, nu, sigma1])
myd = np.zeros((runs, len(params)))
for i in range(runs):
x = generateX(N, *params)
myd[i, :] = scipy.optimize.fmin(logli, x0 = params, args = (x, ), xtol = 1e-5, ftol = 1e-10)
print myd
# fitting to some actual data: dat1 TASE returns from 2000-2017 (maybe backwards)
# dat2 IBM daily returns from 2010-2012 (maybe backwards)
x1 = np.array(file('dat1').read().splitlines()).astype(np.float)
x2 = np.flip(x1,0)
y1 = np.array(file('dat2').read().splitlines()).astype(np.float)
y2 = np.flip(y1,0)
sw = np.array([0.1, 0.5, 0.4, 5.1, 1.5])
a1 = scipy.optimize.fmin(logli, x0 = sw, args = (x1, ), xtol = 1e-5, ftol = 1e-10)
a2 = scipy.optimize.fmin(logli, x0 = sw, args = (x2, ), xtol = 1e-5, ftol = 1e-10)
b1 = scipy.optimize.fmin(logli, x0 = sw, args = (y1, ), xtol = 1e-5, ftol = 1e-10)
b2 = scipy.optimize.fmin(logli, x0 = sw, args = (y2, ), xtol = 1e-5, ftol = 1e-10)
|
# Copyright 2018 Nicholas Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Blueprint, Response, request, abort
import json
import os
import time
from galini_io.reader import MessageReader
from galini_dashboard.API.ConnectionManager import ConnectionManager
def create_logs_blueprint(static_path):
manager = ConnectionManager(static_path)
logs_endpoint = Blueprint("logs_endpoint", __name__, static_folder=static_path)
@logs_endpoint.route("/init")
def get():
return manager.establishNewConnection()
@logs_endpoint.route("/getlist", methods=["GET"])
def getList():
directories = os.listdir(static_path)
arr = []
for name in directories:
arr.append(name)
return json.dumps(arr)
@logs_endpoint.route("/gettext", methods=["POST"])
def getText():
body = request.get_json()
con = getConnection(body["id"])
filename = body["filename"]
try:
return con.readText(filename)
except FileNotFoundError:
abort(400) # File not found
@logs_endpoint.route("/getstate", methods=["POST"])
def getState():
body = request.get_json()
con = getConnection(body["id"])
filename = body["filename"]
try:
return json.dumps(con.readState(filename))
except FileNotFoundError:
abort(400) # File not found
@logs_endpoint.route("/getSymmetry", methods=["POST"])
def getSymmetry():
body = request.get_json()
con = getConnection(body["id"])
filename = body["filename"]
try:
f = open(os.path.join(static_path, filename, "symmetry.json"), "r")
return json.dumps(json.load(f))
except FileNotFoundError:
return json.dumps([])
def getConnection(uuid):
con = manager.getConnection(uuid)
if con is None:
abort(400) # User id not found
return con
return logs_endpoint
|
import sys
from rosalind_utility import hamming_dist
if __name__ == "__main__":
'''
Given: Two DNA strings s and t of equal length (not exceeding 1 kbp).
Return: The Hamming distance dH(s,t).
'''
input_lines = sys.stdin.read().splitlines()
s1 = input_lines[0]
s2 = input_lines[1]
print(hamming_dist(s1, s2))
|
#import sys
#input = sys.stdin.readline
def main():
N = int( input())
P = list( map( int, input().split()))
ans = 0
now = P[0]
for p in P:
if now >= p:
ans += 1
now = p
print(ans)
if __name__ == '__main__':
main()
|
from PiSearchStrategy import *
import pygame
from pygame.locals import *
class IntroScreen(object):
def __init__(self, surface):
self.surface = surface
def Show(self):
self.Draw()
return self.HandleEvents()
def Draw(self):
# First, fill the whole screen with black
self.surface.fill(BLACK)
# Draw Game Title
fontObj = pygame.font.Font('freesansbold.ttf', 32)
textSurfaceObj = fontObj.render('Pi Search Strategy!', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (100, 50)
self.surface.blit(textSurfaceObj, textRectObj)
# Draw game description
fontObj = pygame.font.Font('freesansbold.ttf', 16)
textSurfaceObj = fontObj.render('Adjust the Raspberry Pi receiver parameters', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 150)
self.surface.blit(textSurfaceObj, textRectObj)
textSurfaceObj = fontObj.render('to detect all threats!', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 175)
self.surface.blit(textSurfaceObj, textRectObj)
# Draw Controls
textSurfaceObj = fontObj.render('Controls:', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 225)
self.surface.blit(textSurfaceObj, textRectObj)
textSurfaceObj = fontObj.render('UP = Increase angle (reduces detection power)', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 250)
self.surface.blit(textSurfaceObj, textRectObj)
textSurfaceObj = fontObj.render('DOWN = Decrease angle (increases detection power)', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 275)
self.surface.blit(textSurfaceObj, textRectObj)
textSurfaceObj = fontObj.render('LEFT = Search left', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 300)
self.surface.blit(textSurfaceObj, textRectObj)
textSurfaceObj = fontObj.render('RIGHT = Search right', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 325)
self.surface.blit(textSurfaceObj, textRectObj)
textSurfaceObj = fontObj.render('A = Increase speed (reduces detection power)', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 350)
self.surface.blit(textSurfaceObj, textRectObj)
textSurfaceObj = fontObj.render('B = Decrease speed (increases detection power)', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 375)
self.surface.blit(textSurfaceObj, textRectObj)
# Draw Options
textSurfaceObj = fontObj.render('Options:', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 425)
self.surface.blit(textSurfaceObj, textRectObj)
textSurfaceObj = fontObj.render('N = new game', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 450)
self.surface.blit(textSurfaceObj, textRectObj)
textSurfaceObj = fontObj.render('Q = quit', True, GREEN, BLACK)
textRectObj = textSurfaceObj.get_rect()
textRectObj.topleft = (50, 475)
self.surface.blit(textSurfaceObj, textRectObj)
pygame.display.update()
def HandleEvents(self):
# Wait forever until the user enters something we can use
while True:
for event in pygame.event.get(): # event handling loop
if event.type == QUIT or \
(event.type == KEYUP and event.key == K_ESCAPE) or \
(event.type == KEYUP and event.key == ord('q')):
return Result.QUIT
elif event.type == KEYUP:
if event.key == ord('n') or event.key == pygame.K_c:
return Result.NEWGAME
|
#!/usr/bin/env python
from subprocess import check_output
import flask
from flask import request, redirect, url_for, make_response
from os import environ
import os
from flask import jsonify
from werkzeug import secure_filename
from clean_data import *
from create_csv import *
from datetime import datetime
## Build - delete csv file from db
## Build - API for Loan Officer data
##
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('mysql://microfinance:hq7Np2Ex@/microfinance', convert_unicode=True)
##
UPLOAD_FOLDER = '/groups/microfinance/csvfiles'
ALLOWED_EXTENSION = set(['csv'])
app = flask.Flask(__name__)
app.debug = True
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
## connection = engine.connect()
## connection.execute("sql command")
@app.route('/')
def index():
"""index page"""
return flask.render_template('index.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSION
def read_data(file_path):
connection = engine.connect()
file_path = file_path[file_path.index('csvfiles'):]
connection.execute("LOAD DATA LOCAL INFILE '" + file_path + "' IGNORE INTO TABLE Clients FIELDS TERMINATED BY ',' (@dummy, ClientID, Name)")
connection.execute("LOAD DATA LOCAL INFILE '" + file_path + "' IGNORE INTO TABLE Loans FIELDS TERMINATED BY ','(LoanID, ClientID, @dummy, LOID, DisbAmount, DisbDate, Category, @dummy, @dummy, @dummy, MatDate, @dummy, @dummy, @dummy, @dummy, @dummy, @dummy, @dummy, @dummy, BranchID)")
connection.execute("LOAD DATA LOCAL INFILE '" + file_path + "' IGNORE INTO TABLE LoanVal FIELDS TERMINATED BY ',' (LoanID, @dummy, @dummy, @dummy, @dummy, @dummy, @dummy, NextPmtAmt, NextPmt, Principal, @dummy, LateIntCollected, DaysPD, PDPrincipal, PDInterest, LateInt, PenaltyInt, ReportDate, LoanIDDate)")
connection.close()
@app.route('/fileupload', methods=['POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
path = (os.path.join(app.config['UPLOAD_FOLDER'], filename))
file.save(path)
clean_data(path,request.form['report_date'])
os.remove(path)
## function with database connection and mysql script
## takes the csv file path as an arguement
read_data(str(path[:path.index('.')] + request.form['report_date'] + '.csv'))
return flask.redirect('microfinance/')
return 'file upload failed'
@app.route('/OrgData',methods=['get'])
def get_org_data():
connection = engine.connect()
data = connection.execute("select LoanVal.ReportDate, COUNT(LoanVal.Principal) as 'loanClients', SUM(LoanVal.Principal) as 'principalBalance', SUM(IF( LoanVal.DaysPD > 1, LoanVal.Principal, 0)) as 'principal1Day', SUM(IF( LoanVal.DaysPD > 30, LoanVal.Principal, 0)) as 'Principal30Day' from Branch, Loans, LoanVal where Branch.BranchID = Loans.BranchID and Loans.LoanID = LoanVal.LoanID group by LoanVal.ReportDate;")
connection.close()
toJSON = []
for row in data:
row_as_dict = dict(row)
toJSON.append(row_as_dict)
for item in toJSON:
item['ReportDate'] = item['ReportDate'].strftime("%Y-%m-%d")
return jsonify(results = toJSON)
@app.route('/BranchData',methods=['get'])
def get_branch_data():
connection = engine.connect()
toJSON = []
branches = [10,11,12,13,14,15]
#branch = request.args['branch']
for branch in branches:
data = connection.execute("select LoanVal.ReportDate, COUNT(LoanVal.Principal) as 'loanClients', SUM(LoanVal.Principal) as 'principalBalance', SUM(IF( LoanVal.DaysPD > 1, LoanVal.Principal, 0)) as 'principal1Day', SUM(IF( LoanVal.DaysPD > 30, LoanVal.Principal, 0)) as 'Principal30Day' from Loans, LoanVal where Loans.LoanID = LoanVal.LoanID and Loans.BranchID = "+str(branch)+" group by LoanVal.ReportDate;")
branch_data = []
for row in data:
row_as_dict = dict(row)
branch_data.append(row_as_dict)
for item in branch_data:
item['ReportDate'] = item['ReportDate'].strftime("%Y-%m-%d")
if branch == 10:
headoffice = branch_data
if branch == 11:
arusha = branch_data
if branch == 12:
dar = branch_data
if branch == 13:
moshi = branch_data
if branch == 14:
tengeru = branch_data
if branch == 15:
himo = branch_data
connection.close()
return jsonify(ho = headoffice, ar = arusha, da = dar, mo = moshi, te = tengeru, hi = himo)
@app.route('/BranchDataFile',methods=['get'])
def get_branch_data_file():
connection = engine.connect()
branches = [10,11,12,13,14,15]
#branch = request.args['branch']
data_for_csv = []
for branch in branches:
data = connection.execute("select LoanVal.ReportDate, COUNT(LoanVal.Principal) as 'loanClients', SUM(LoanVal.Principal) as 'principalBalance', SUM(IF( LoanVal.DaysPD > 1, LoanVal.Principal, 0)) as 'principal1Day', SUM(IF( LoanVal.DaysPD > 30, LoanVal.Principal, 0)) as 'Principal30Day' from Loans, LoanVal where Loans.LoanID = LoanVal.LoanID and Loans.BranchID = "+str(branch)+" group by LoanVal.ReportDate;")
data_for_csv.append([branch,data])
connection.close()
(file_basename, server_path, file_size) = create_csv(data_for_csv)
return_file = open(server_path+file_basename, 'r')
response = make_response(return_file,200)
response.headers['Content-Description'] = 'File Transfer'
response.headers['Cache-Control'] = 'no-cache'
response.headers['Content-Type'] = 'text/csv'
response.headers['Content-Disposition'] = 'attachment; filename=%s' % file_basename
response.headers['Content-Length'] = file_size
return response
@app.route('/LoanOfficerData',methods=['GET'])
def get_loan_officer_data():
## needs to iterate through dates in db
## needs to accept arg BranchID
accepted_branches = ['10','11','12','13','14','15']
branch = request.args['branch']
if branch not in accepted_branches:
flask.render_template('index.html'), 404
connection = engine.connect()
dates = []
date_query = connection.execute("select distinct ReportDate from LoanVal;")
for row in date_query:
row_as_dict = dict(row)
row_as_dict['ReportDate'] = row_as_dict['ReportDate'].strftime("%Y-%m-%d")
dates.append(row_as_dict)
loan_officer_data = []
for date in dates:
data = connection.execute("select LO.Name, COUNT(LoanVal.Principal) as 'loanClients', SUM(LoanVal.Principal) as 'principalBalance', SUM(IF( LoanVal.DaysPD > 1, LoanVal.Principal, 0)) as 'principal1Day', SUM(IF( LoanVal.DaysPD > 30, LoanVal.Principal, 0)) as 'Principal30Day' from LO, Loans, LoanVal where LO.LOID = Loans.LOID and Loans.LoanID = LoanVal.LoanID and LoanVal.ReportDate = '"+date['ReportDate']+"' and Loans.BranchID = "+branch+" group by LO.Name;")
temp = []
for row in data:
row_as_dict = dict(row)
temp.append(row_as_dict)
loan_officer_data.append([date['ReportDate'],temp])
connection.close()
return jsonify(results = loan_officer_data)
@app.route('/CsvDataFiles',methods=['GET','DELETE'])
def get_csv_files():
if request.method == 'GET':
csv_files = []
for files in os.walk(UPLOAD_FOLDER):
csv_files.append(files[2])
csv_files = csv_files[0]
return jsonify(files = csv_files)
if request.method == 'DELETE':
files_string = request.data
files_string = files_string.split('&')
files_delete = []
for item in files_string:
files_delete.append(item[item.index('=')+1:])
for item in files_delete:
date = item[item.index('.')-10:item.index('.')]
os.remove(UPLOAD_FOLDER+'/'+item)
connection = engine.connect()
connection.execute("DELETE FROM LoanVal WHERE ReportDate = '" + date + "';")
connection.close()
return "Deleted " + ' '.join(files_delete)
if __name__ == "__main__":
app.run(port=60050)
|
from tkinter import filedialog
from tkinter import *
import time
root = Tk()
root.filename = filedialog.askopenfilename(initialdir=r"C:\Users\Dom\Desktop", title="Select file")
file = open(root.filename, "rb")
root.destroy() # kill it
print("Processing file...")
text = file.read()
totalsize = len(text)
filesfound = 0
starttime = time.time()
print(f"Started at timecode = {starttime}")
i = 0 #counter to parse through file.
while i < totalsize:
i += 1
if text[i:i + 4] == b'VAGp': # found VAGp likely.
print(f"VAG header found at {hex(i)}")
#i is the start of the VAG file.
waveformsize = text[i+8:i+8+4]
print(f"waveform is of size {waveformsize}")
waveform = text[i+]
endpos = y + 4
contents = text[i-1:endpos + offset]
output = open(f"{filesfound}.png", "w+b")
output.write(contents)
filesfound += 1
output.close
i = endpos # speed up processing significantly.
percent = i / len(text) * 100
stoptime = time.time()
predtime = "{:.1f}".format((stoptime - starttime) / (percent / 100) - (
stoptime - starttime)) # linear forecast of the length of the program, then subtract the runtime so far
# spit out some probably useful info for ya butt
print("------------------------------------------")
print(f"Parsed {percent}% of file so far")
print(f"Remaining time: {predtime} sec")
print(f"Saved file as {filesfound}.png")
print(f"filesize is {len(contents)} bytes")
print("------------------------------------------")
print("..Done!")
|
# Author: Nathan Shelby
# Date: 3/11/20
# Description: Create a working digital version of the game Xiangqi
# Create a class called XiangqiGame that initializes a board (which is a list of lists), a move counter to see whose
# Turn it is, the game state, and the check status of both players.
# The board has the shortened string of the color and type of every piece.
class XiangqiGame:
def __init__(self):
self.__move_counter = 0
self.__game_state = 'UNFINISHED'
self.__black_check = 'NOT IN CHECK'
self.__red_check = 'NOT IN CHECK'
self.__board = [[' '], ['a '], ['b '], ['c '], ['d '], ['e '], ['f '], ['g '], ['h '], ['i '],
['10'], ['BC'], ['BH'], ['BE'], ['BA'], ['BG'], ['BA'], ['BE'], ['BH'], ['BC'],
['9'], [' '], [' '], [' '], [' '], [' '], [' '], [' '], [' '], [' '],
['8'], [' '], ['BN'], [' '], [' '], [' '], [' '], [' '], ['BN'], [' '],
['7'], ['BS'], [' '], ['BS'], [' '], ['BS'], [' '], ['BS'], [' '], ['BS'],
['6'], [' '], [' '], [' '], [' '], [' '], [' '], [' '], [' '], [' '],
['5'], [' '], [' '], [' '], [' '], [' '], [' '], [' '], [' '], [' '],
['4'], ['RS'], [' '], ['RS'], [' '], ['RS'], [' '], ['RS'], [' '], ['RS'],
['3'], [' '], ['RN'], [' '], [' '], [' '], [' '], [' '], ['RN'], [' '],
['2'], [' '], [' '], [' '], [' '], [' '], [' '], [' '], [' '], [' '],
['1'], ['RC'], ['RH'], ['RE'], ['RA'], ['RG'], ['RA'], ['RE'], ['RH'], ['RC']]
# Create a function that handles the horse movement. It takes the current sub-list number and the destination
# Sub-list number from the make_move function and moves the horse piece to the destination if it is a valid move.
def horse_move(self, loc, dest):
# Initialize a variable to hold the board, a variable to hold the current location,
# a variable to hold the destination, a variable that is the difference of the two, and a list of viable moves.
board = self.__board
spot_h = loc
destination = dest
combo_numb = destination - spot_h
viable_moves = [8, -8, 12, -12, 19, -19, 21, -21]
# Check to see if the destination is one of the 8 valid spots that the horse can move to from the current spot.
# If not, print the error and return.
if combo_numb not in viable_moves:
raise NotALegalMove
# Split the string of the destination and current sub-list and see if the first character in that sub-list are
# The same. If they are, raise an error.
dest_char = [char for char in board[destination][0]]
cur_char = [char for char in board[spot_h][0]]
if dest_char[0] == cur_char[0]:
raise NotALegalMove
# Check to see if there is a piece in the way of the horse moving backwards. If so, raise an error.
if destination == spot_h - 19:
horse_check = spot_h - 10
if board[horse_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the horse moving forward. If so, raise an error.
if destination == spot_h + 19:
horse_check = spot_h + 10
if board[horse_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the horse moving forward. If so, raise an error.
if destination == spot_h + 21:
horse_check = spot_h + 10
if board[horse_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the horse moving backwards. If so, raise an error.
if destination == spot_h - 21:
horse_check = spot_h - 10
if board[horse_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the horse moving left. If so, raise an error.
if destination == spot_h - 8:
horse_check = spot_h + 1
if board[horse_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the horse moving right. If so, raise an error.
if destination == spot_h + 8:
horse_check = spot_h - 1
if board[horse_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the horse moving right. If so, raise an error.
if destination == spot_h + 12:
horse_check = spot_h + 1
if board[horse_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the horse moving left. If so, raise an error.
if destination == spot_h - 12:
horse_check = spot_h - 1
if board[horse_check] != [' ']:
raise NotALegalMove
# If all the checks have passed, return true
return True
# Create a function that handles the elephant movement. It takes the current sub-list number and the destination
# Sub-list number from the make_move function and moves the elephant piece to the destination if it is a valid move.
def elephant_move(self, loc, dest):
# Initialize a variable to hold the river values, the board, the current sub-list location, the destination
# Sub-list location, a variable to hold the difference between the two, and a list of viable moves
river = [50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69]
board = self.__board
spot_e = loc
destination = dest
combo_num = destination - spot_e
viable_moves = [18, -18, 22, -22]
# Check to see if the destination value is one of the 4 viable moves. If not, it prints an error and returns.
if combo_num not in viable_moves:
raise NotALegalMove
# Check to see if there is a piece in the way of the elephant moving backwards.
# If so, raise an error.
if destination == spot_e - 18:
eleph_check = spot_e - 9
if board[eleph_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the elephant moving forwards.
# If so, raise an error.
if destination == spot_e + 18:
eleph_check = spot_e + 9
if board[eleph_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the elephant moving forwards.
# If so, raise an error.
if destination == spot_e + 22:
eleph_check = spot_e + 11
if board[eleph_check] != [' ']:
raise NotALegalMove
# Check to see if there is a piece in the way of the elephant moving backwards.
# If so, raise an error.
if destination == spot_e - 22:
eleph_check = spot_e - 11
if board[eleph_check] != [' ']:
raise NotALegalMove
# Split the string of the destination and current sub-list and see if the first character in that sub-list are
# The same. If they are, raise an error.
dest_char = [char for char in board[destination][0]]
cur_char = [char for char in board[spot_e][0]]
if dest_char[0] == cur_char[0]:
raise NotALegalMove
# Check to see if the elephant has tried to cross the river. If so, raise an error
if spot_e < 60:
if destination > 60:
raise NotALegalMove
if spot_e > 59:
if destination < 59:
raise NotALegalMove
# If all the checks have passed, return true
return True
# Create a function that handles the chariot movement. It takes the current sub-list number and the destination
# Sub-list number from the make_move function and moves the horse piece to the destination if it is a valid move.
def chariot_move(self, loc, dest):
# Initialize a variable to hold the board, the current sub-list location, the destination
# Sub-list location, the difference between the two, and a list of viable moves
board = self.__board
destination = dest
spot_c = loc
combo_num = destination - spot_c
valid_moves = [10, -10, 20, -20, 30, -30, 40, -40, 50, -50, 60, -60, 70, -70, 80, -80, 90, -90, 1, -1, 2, -2, 3,
-3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8]
# Check to see if the destination is one of the viable moves for the chariot. If not, raise an error
if combo_num not in valid_moves:
raise NotALegalMove
# If the destination is within the range of 0-10, ensure that the chariot doesn't move to a new row
# Laterally by splitting the string of the current spot and the destination spot. Compare the first number of
# That string and ensure that they are the same. If not, raise an error
row_check = destination - spot_c
if 0 < row_check < 10:
str_spot = [char for char in str(spot_c)[0]]
str_dest = [char for char in str(destination)[0]]
if str_spot != str_dest:
raise NotALegalMove
# Check to see if there is a piece between the chariot and the destination.
# If so, raise an error
test = spot_c + 1
test_2 = destination - 1
if test_2 > test:
for x in range(spot_c + 1, destination):
if board[x] != [' ']:
raise NotALegalMove
if test == test_2:
if board[test] != [' ']:
raise NotALegalMove
# If the destination is within the range of 0-(-10), ensure that the chariot doesn't move to a new row
# Laterally by splitting the string of the current spot and the destination spot. Compare the first number of
# That string and ensure that they are the same. If not, raise an error
if 0 > row_check > - 10:
str_spot = [char for char in str(spot_c)[0]]
str_dest = [char for char in str(destination)[0]]
if str_spot != str_dest:
raise NotALegalMove
# Check to see if there is a piece between the chariot and the destination.
# If so, raise an error
test = spot_c - 1
test_2 = destination + 1
if test_2 < test:
for x in range(destination + 1, spot_c):
if board[x] != [' ']:
raise NotALegalMove
if test == test_2:
if board[test] != [' ']:
raise NotALegalMove
# Split the string of the destination and current sub-list and see if the first character in that sub-list are
# The same. If they are, raise an error.
if board[destination] != [' ']:
dest_char = [char for char in board[destination][0]]
cur_char = [char for char in board[spot_c][0]]
if dest_char[0] == cur_char[0]:
raise NotALegalMove
if combo_num % 10 == 0:
if combo_num > 0:
str_combo = [char for char in str(combo_num)]
fin_str = int(str_combo[0])
if fin_str > 1:
for x in range(1, fin_str):
y = 10 * x
if board[spot_c + y] != [' ']:
raise NotALegalMove
if combo_num < 0:
str_combo = [char for char in str(combo_num)]
fin_str = int(str_combo[1])
if fin_str > 1:
for x in range(1, fin_str):
y = -10 * x
if board[spot_c + y] != [' ']:
raise NotALegalMove
# If all the checks have passed, return true
return True
# Create a function that handles the cannon movement. It takes the current sub-list number and the destination
# Sub-list number from the make_move function and moves the horse piece to the destination if it is a valid move.
def cannon_move(self, counter, loc, dest):
# Initialize a variable to hold the board, the current sub-list location, the destination
# Sub-list location, the difference between the two, and a list of viable moves
board = self.__board
destination = dest
spot_n = loc
combo_num = destination - spot_n
cannon_jump = 0
loop_counter = counter
valid_moves = [10, -10, 20, -20, 30, -30, 40, -40, 50, -50, 60, -60, 70, -70, 80, -80, 90, -90, 1, -1, 2, -2, 3,
-3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8]
# Check to see if the destination is one of the viable moves for the cannon. If not, raise an error
if combo_num not in valid_moves:
raise NotALegalMove
# If the destination is within the range of 0-10, ensure that the chariot doesn't move to a new row
# Laterally by splitting the string of the current spot and the destination spot. Compare the first number of
# That string and ensure that they are the same. If not, raise an error
row_check = destination - spot_n
if 0 < row_check < 10:
str_spot = [char for char in str(spot_n)[0]]
str_dest = [char for char in str(destination)[0]]
if str_spot != str_dest:
raise NotALegalMove
# Create a counter and count the number of occupied spaces between the destination and the current spot.
# If it is more than 1, raise an error, if it's 0, raise an error.
test = spot_n + 1
test_2 = destination - 1
if test_2 > test:
if loop_counter == 2:
pass
if board[destination] != [' '] or loop_counter == 1:
for x in range(spot_n + 1, destination):
if board[x] != [' ']:
cannon_jump += 1
if cannon_jump > 1:
raise NotALegalMove
if cannon_jump == 0:
raise NotALegalMove
if test == test_2:
if board[test] == [' ']:
raise NotALegalMove
# If the destination is within the range of 0-(-10), ensure that the chariot doesn't move to a new row
# Laterally by splitting the string of the current spot and the destination spot. Compare the first number of
# That string and ensure that they are the same. If not, raise an error
if 0 > row_check > - 10:
str_spot = [char for char in str(spot_n)[0]]
str_dest = [char for char in str(destination)[0]]
if str_spot != str_dest:
raise NotALegalMove
# Create a counter and count the number of occupied spaces between the destination and the current spot.
# If it is more than 1, raise an error, if it's 0, raise an error.
test = spot_n - 1
test_2 = destination + 1
if test_2 < test:
if loop_counter == 2:
pass
if board[destination] != [' '] or loop_counter == 1:
for x in range(destination + 1, spot_n):
if board[x] != [' ']:
cannon_jump += 1
if cannon_jump > 1:
raise NotALegalMove
if cannon_jump == 0:
raise NotALegalMove
if test == test_2:
if board[test] == [' ']:
raise NotALegalMove
if combo_num % 10 == 0:
if loop_counter == 2:
pass
if board[destination] != [' '] or loop_counter == 1:
if combo_num > 0:
str_combo = [char for char in str(combo_num)]
fin_str = int(str_combo[0])
if fin_str == 1:
raise NotALegalMove
if fin_str > 1:
for x in range(1, fin_str):
y = 10 * x
if board[spot_n + y] != [' ']:
cannon_jump += 1
if cannon_jump > 1:
raise NotALegalMove
if cannon_jump == 0:
raise NotALegalMove
if combo_num < 0:
str_combo = [char for char in str(combo_num)]
fin_str = int(str_combo[1])
if fin_str == 1:
raise NotALegalMove
if fin_str > 1:
for x in range(1, fin_str):
y = -10 * x
if board[spot_n + y] != [' ']:
cannon_jump += 1
if cannon_jump > 1:
raise NotALegalMove
if cannon_jump == 0:
raise NotALegalMove
# Split the string of the destination and current sub-list and see if the first character in that sub-list are
# The same. If they are, raise an error.
if board[destination] != [' ']:
dest_char = [char for char in board[destination][0]]
cur_char = [char for char in board[spot_n][0]]
if dest_char[0] == cur_char[0]:
raise NotALegalMove
# If all the checks have passed, return True
return True
# Create a function that handles the soldier movement. It takes the current sub-list number and the destination
# Sub-list number from the make_move function and moves the horse piece to the destination if it is a valid move.
def soldier_move(self, loc, dest):
# Initialize a variable to hold the river values, the board, the current sub-list location, the destination
# Sub-list location, the difference between the two, and a list of viable moves
river = [50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69]
board = self.__board
spot_s = loc
destination = dest
combo_num = destination - spot_s
viable_moves = [1, -1, 10, -10]
# Check to see if the movement we are expected to do is viable
if combo_num not in viable_moves:
raise NotALegalMove
# Split the string of the destination and current sub-list and see if the first character in that sub-list are
# The same. If they are, raise an error.
dest_char = [char for char in board[destination][0]]
cur_char = [char for char in board[spot_s][0]]
if dest_char[0] == cur_char[0]:
raise NotALegalMove
# Check to see if the soldier has crossed the river and thus can move sideways.
# If not and we are expected to move sideways, raise an error
if cur_char[0] == 'B':
if spot_s < 60:
combo_num = destination - spot_s
if combo_num % 10 != 0:
raise NotALegalMove
if destination == spot_s - 10:
raise NotALegalMove
if cur_char[0] == 'R':
if spot_s > 59:
combo_num = destination - spot_s
if combo_num % 10 != 0:
raise NotALegalMove
if destination == spot_s + 10:
raise NotALegalMove
# If all the tests have passed, return true
return True
# Create a function that handles the advisor movement. It takes the current sub-list number and the destination
# Sub-list number from the make_move function and moves the horse piece to the destination if it is a valid move.
def advisor_move(self, loc, dest):
# Initialize a variable to hold the palace values, the board, the current sub-list location, the destination
# Sub-list location, the difference between the two, and a list of viable moves
palace = [14, 15, 16, 24, 25, 26, 34, 35, 36, 84, 85, 86, 94, 95, 96, 104, 105, 106]
board = self.__board
spot_a = loc
destination = dest
combo_num = destination - spot_a
viable_moves = [9, -9, 11, -11]
# Check to see if the destination is one of the viable moves for the advisor. If not, raise an error
if combo_num not in viable_moves:
raise NotALegalMove
# Split the string of the destination and current sub-list and see if the first character in that sub-list are
# The same. If they are, raise an error.
dest_char = [char for char in board[destination][0]]
cur_char = [char for char in board[spot_a][0]]
if dest_char[0] == cur_char[0]:
raise NotALegalMove
# Check to see if the destination value is in the palace value list. If not, raise an error
if destination not in palace:
raise NotALegalMove
# If all the checks have passed, put the string in the current sub-list into a variable, make the current
# sublist 'empty', and make the destination sub-list hold the piece.
return True
# Create a function that handles the general movement. It takes the current sub-list number and the destination
# Sub-list number from the make_move function and moves the horse piece to the destination if it is a valid move.
def general_move(self, counter, loc, dest):
# Initialize a variable to hold the palace values, the board, the current sub-list location, the destination
# Sub-list location, lists of all the viable moves of all the pieces that can take the general, and a loop
# Counter that is used later to run tests to see if the general has any viable moves left.
soldier_moves = [1, -1, 10, -10]
general_moves = [1, -1, 10, -10, 70, -70, 80, -80, 90, -90]
cannon_moves = [10, -10, 20, -20, 30, -30, 40, -40, 50, -50, 60, -60, 70, -70, 80, -80, 90, -90, 1, -1, 2, -2,
3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8]
horse_moves = [8, -8, 12, -12, 19, -19, 21, -21]
palace = [14, 15, 16, 24, 25, 26, 34, 35, 36, 84, 85, 86, 94, 95, 96, 104, 105, 106]
board = self.__board
destination = dest
spot_g = loc
move_counter = self.__move_counter
loop_counter = counter
combo_num = destination - spot_g
# Check to see if the destination is one of the viable moves for the general. If not, raise an error
if combo_num not in general_moves:
raise NotALegalMove
# Split the string of the destination and current sub-list and see if the first character in that sub-list are
# The same. If they are, raise an error.
dest_char = [char for char in board[destination][0]]
cur_char = [char for char in board[spot_g][0]]
if dest_char[0] == cur_char[0]:
raise NotALegalMove
# For the flying general move, check to see if the spaces between the generals are empty. If they are not,
# raise an error
if destination == spot_g + 70:
if dest_char[1] == 'G' or loop_counter == 1:
for x in range(1, 8):
y = x * 10
if board[spot_g + y] != [' ']:
raise NotALegalMove
if destination == spot_g - 70:
if dest_char[1] == 'G' or loop_counter == 1:
for x in range(1, 8):
y = x * -10
if board[spot_g + y] != [' ']:
raise NotALegalMove
if destination == spot_g + 80:
if dest_char[1] == 'G' or loop_counter == 1:
for x in range(1, 8):
y = x * 10
if board[spot_g + y] != [' ']:
raise NotALegalMove
if destination == spot_g - 80:
if dest_char[1] == 'G' or loop_counter == 1:
for x in range(1, 8):
y = x * -10
if board[spot_g + y] != [' ']:
raise NotALegalMove
if destination == spot_g + 90:
if dest_char[1] == 'G' or loop_counter == 1:
for x in range(1, 8):
y = x * 10
if board[spot_g + y] != [' ']:
raise NotALegalMove
if destination == spot_g - 90:
if dest_char[1] == 'G' or loop_counter == 1:
for x in range(1, 8):
y = x * -10
if board[spot_g + y] != [' ']:
raise NotALegalMove
# Check to see if the destination value is in the palace value list. If not, raise an error
if destination not in palace:
raise NotALegalMove
# Check to see who's turn it is. Take the destination spot and see if any of the opposing team's pieces will
# Be able to reach that spot next turn. If so, raise an error. If not, continue
if move_counter % 2 == 0:
b = destination
for z in soldier_moves:
try:
if board[b + z] == ['BS']:
try:
if XiangqiGame.soldier_move(self, loc=b + z, dest=b):
raise GeneralCheck
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
for z in cannon_moves:
try:
if board[b + z] == ['BN']:
try:
if XiangqiGame.cannon_move(self, 1, loc=b + z, dest=b):
raise GeneralCheck
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
try:
if board[b + z] == ['BC']:
try:
if XiangqiGame.chariot_move(self, loc=b + z, dest=b):
raise GeneralCheck
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
for z in horse_moves:
try:
if board[b + z] == ['BH']:
try:
if XiangqiGame.horse_move(self, loc=b + z, dest=b):
raise GeneralCheck
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
for z in general_moves:
try:
if board[b + z] == ['BG']:
if loop_counter == 0:
try:
if XiangqiGame.general_move(self, 1, loc=b+z, dest=b):
raise GeneralCheck
else:
continue
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
if move_counter % 2 != 0:
b = destination
for z in soldier_moves:
try:
if board[b + z] == ['RS']:
try:
if XiangqiGame.soldier_move(self, loc=b + z, dest=b):
raise GeneralCheck
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
for z in cannon_moves:
try:
if board[b + z] == ['RN']:
try:
if XiangqiGame.cannon_move(self, 1, loc=b + z, dest=b):
raise GeneralCheck
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
try:
if board[b + z] == ['RC']:
try:
if XiangqiGame.chariot_move(self, loc=b + z, dest=b):
raise GeneralCheck
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
for z in horse_moves:
try:
if board[b + z] == ['RH']:
try:
if XiangqiGame.horse_move(self, loc=b + z, dest=b):
raise GeneralCheck
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
for z in general_moves:
try:
if board[b + z] == ['RG']:
if loop_counter == 0:
try:
if XiangqiGame.general_move(self, 1, loc=b+z, dest=b):
raise GeneralCheck
else:
continue
except NotALegalMove:
continue
except GeneralCheck:
raise NotALegalMove
except IndexError:
continue
# If all the tests pass, return True
return True
# Create a function that returns the game state
def get_game_state(self):
# return the game state data member
return self.__game_state
# Create a function to check if the general of the color passed as an argument is in check
def is_in_check(self, color):
# Create a variable to hold the color and the two viable colors for comparison
b_check = 'black'
r_check = 'red'
c_check = color
# compare the string that was entered to the two viable colors. If it matches one of them, return False if that
# color is not in check and True if it is in check
if c_check.casefold() == b_check.casefold():
b_test = self.__black_check
if b_test == 'NOT IN CHECK':
return False
else:
return True
if c_check.casefold() == r_check.casefold():
r_test = self.__red_check
if r_test == 'NOT IN CHECK':
return False
else:
return True
# Return False if the color was not one of the two viable colors
return False
# Create a function to make a move. It takes a string of the spot you are moving from and the spot you are moving
# To and will call the correct piece function for the piece in the current spot
def make_move(self, left, to):
# Create variables for a column counter, a row counter, the game state, viable piece movements, and the board
game_state = self.__game_state
count_col = 0
count_row = 0
board = self.__board
move_counter = self.__move_counter
red_soldier_moves = [1, 10, -10]
black_soldier_moves = [-1, 10, -10]
general_moves = [1, -1, 10, -10, 70, -70, 80, -80, 90, -90]
cannon_moves = [10, -10, 20, -20, 30, -30, 40, -40, 50, -50, 60, -60, 70, -70, 80, -80, 90, -90, 1, -1, 2, -2,
3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8]
horse_moves = [8, -8, 12, -12, 19, -19, 21, -21]
elephant_moves = [18, -18, 22, -22]
advisor_moves = [9, -9, 11, -11]
# If the game has a winner, return False
if game_state != 'UNFINISHED':
return False
# Split the string of the board spot you are moving from and take
# The column letter and the row number to variables
moved_from = [[char] for char in left]
moved_from_col = moved_from[0]
moved_from_row = moved_from[1]
# Split the string of the board spot you are moving to and take
# The column letter and the row number to variables
moved_to = [[char] for char in to]
moved_to_col = moved_to[0]
moved_to_row = moved_to[1]
# Check to see if the string for the spot you are moving from/to is 3 characters long. If so, you must be
# Dealing with row number 10. Combine the 1 and 0 string to remake that number.
if len(moved_from) == 3:
test_str = []
test_str.append(moved_from[1][0])
test_str.append(moved_from[2][0])
test_3 = ''.join(test_str[0:2])
moved_from_row = [str(test_3)]
if len(moved_to) == 3:
test_str = []
test_str.append(moved_to[1][0])
test_str.append(moved_to[2][0])
test_3 = ''.join(test_str[0:2])
moved_to_row = [str(test_3)]
# Count through the board list, comparing the first character in each sublist with the letter of the space you
# Are moving from, adding 1 for every space that isn't the one you are looking for. When you find the target
# Sub-list, move on to the next for loop
for i, e in enumerate(board):
lett = [char for char in board[i][0]]
lett_check = [lett[0]]
if lett_check != moved_from_col:
count_col += 1
else:
break
# Count through the board list, comparing the each sublist with the number of the space you
# Are moving from, adding 1 for every space that isn't the one you are looking for. When you find the target
# Sub-list, add the counter for row and column together into a variable and reset the counters.
for o, p in enumerate(board):
if p != moved_from_row:
count_row += 1
else:
spot_from = count_col + count_row
count_col = 0
count_row = 0
break
# Check to see if the spot you are trying to move from is empty. If so, raise an error. If not,
# Split the string of the piece and put that into a variable as a list. Create a variable to hold the second
# Letter of the piece.
if board[spot_from] == [' ']:
return
else:
piece = [char for char in board[spot_from][0]]
piece_check = piece[1]
lett = [char for char in board[spot_from][0]]
# Check to see if it is not the turn of the piece that is trying to move. If so, return False
if lett[0] == 'R':
if move_counter % 2 != 0:
return False
if lett[0] == 'B':
if move_counter % 2 == 0:
return False
# Count through the board list, comparing the first character in each sublist with the letter of the space you
# Are moving to, adding 1 for every space that isn't the one you are looking for. When you find the target
# Sub-list, move on to the next for loop
for u, z in enumerate(board):
lett = [char for char in board[u][0]]
lett_check = [lett[0]]
if lett_check != moved_to_col:
count_col += 1
else:
break
# Count through the board list, comparing the each sublist with the number of the space you
# Are moving to, adding 1 for every space that isn't the one you are looking for. When you find the target
# Sub-list, add the counter for row and column together into a variable and reset the counters.
for l, b in enumerate(board):
if b != moved_to_row:
count_row += 1
else:
spot_to = count_col + count_row
break
# Take the variable that holds the second letter of the piece and run the function for movement that is relevant
# To that piece. If the move is not viable, return False
if piece_check == 'E':
try:
XiangqiGame.elephant_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'G':
try:
XiangqiGame.general_move(self, 0, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'A':
try:
XiangqiGame.advisor_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'H':
try:
XiangqiGame.horse_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'C':
try:
XiangqiGame.chariot_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'N':
try:
XiangqiGame.cannon_move(self, 0, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'S':
try:
XiangqiGame.soldier_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
# Place the string in the current sublist location and cast it into a variable. Replace that location with a
# Blank board string, and move the piece to the destination
piece = board[spot_from]
board[spot_from] = [' ']
board[spot_to] = piece
# Increase the move counter by 1 and run the check_check function to see if anyone is currently in check
move_counter += 1
self.__move_counter = move_counter
XiangqiGame.check_check(self, board=board)
# If the move that was just made causes their own team to be in check, reverse the move and return False. Else,
# Run the viable_moves function to see if the opponent has any viable moves left. If so, return True, if not,
# Change the game status to the winner and return True.
if move_counter % 2 != 0:
if XiangqiGame.is_in_check(self, 'red'):
board[spot_from] = piece
board[spot_to] = [' ']
move_counter -= 1
self.__move_counter = move_counter
return False
for v in range(0, 110):
try:
if board[v] == ['BG']:
for x in general_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['BS']:
for x in black_soldier_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['BA']:
for x in advisor_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['BC']:
for x in cannon_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['BN']:
for x in cannon_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['BE']:
for x in elephant_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['BH']:
for x in horse_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
except NotALegalMove:
continue
except IndexError:
continue
game_state = 'RED_WON'
self.__game_state = game_state
return True
if move_counter % 2 == 0:
if XiangqiGame.is_in_check(self, 'black'):
board[spot_from] = piece
board[spot_to] = [' ']
move_counter -= 1
self.__move_counter = move_counter
return False
for v in range(0, 110):
try:
if board[v] == ['RG']:
for x in general_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['RS']:
for x in red_soldier_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['RA']:
for x in advisor_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['RC']:
for x in cannon_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['RN']:
for x in cannon_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['RE']:
for x in elephant_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
if board[v] == ['RH']:
for x in horse_moves:
try:
if XiangqiGame.viable_move(self, left=v, to=v+x):
return True
except NotALegalMove:
continue
except IndexError:
continue
except NotALegalMove:
continue
except IndexError:
continue
game_state = 'BLACK_WON'
self.__game_state = game_state
return True
# A function that tells whether anyone is currently in check or not.
def check_check(self, board):
# Initialize the current check status of both players, the value passed as the board, the move counter, the
# Viable moves for all pieces, and the numbers along the left hand side of the board
black_check = self.__black_check
red_check = self.__red_check
board = board
move_counter = self.__move_counter - 1
soldier_moves = [1, -1, 10, -10]
black_general_moves = [1, -1, 10, -10, 70, 80, 90]
red_general_moves = [1, -1, 10, -10, -70, -80, -90]
cannon_moves = [10, -10, 20, -20, 30, -30, 40, -40, 50, -50, 60, -60, 70, -70, 80, -80,
90, -90, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 7, -7, 8, -8]
horse_moves = [8, -8, 12, -12, 19, -19, 21, -21]
end_check = ['1','2','3','4','5','6','7','8','9','10']
# Find the black general in the board
for o in range(0, 110):
if board[o] == ['BG']:
# Check to see if the red general is across the board from the black general. If so, check to see
# If any pieces are between them. If there are, raise the NotCheck error. If there aren't,
# set the black check status to IS IN CHECK
for p in black_general_moves:
try:
if board[o + p] == ['RG']:
str_int = [char for char in str(p)]
check_test = 0
if str_int[0] == '-':
for t in range(1, int(str_int[1])):
y = t * 10
if board[o + y] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
else:
for t in range(1, int(str_int[0])):
y = t * 10
if board[o + y] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK':
return
except IndexError:
continue
except NotCheck:
continue
# Check to see if red soldiers are in range of the black general. If there aren't, raise the
# NotCheck error. If there are, set the black check status to IS IN CHECK
for p in soldier_moves:
try:
if board[o + p] == ['RS']:
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
except:
continue
# Check to see if red horses are in legal range of the black general. If there aren't, raise the
# NotCheck error. If there are, set the black check status to IS IN CHECK
for p in horse_moves:
try:
if board[o + p] == ['RH']:
if p == -8:
if board[o - 9] != [' ']:
raise NotCheck
else:
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if p == 8:
if board[o + 9] != [' ']:
raise NotCheck
else:
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if p == -12:
if board[o - 11] != [' ']:
raise NotCheck
else:
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if p == 12:
if board[o + 11] != [' ']:
raise NotCheck
else:
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if p == -19:
if board[o - 9] != [' ']:
raise NotCheck
else:
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if p == 19:
if board[o + 9] != [' ']:
raise NotCheck
else:
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if p == -21:
if board[o - 11] != [' ']:
raise NotCheck
else:
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if p == 21:
if board[o + 11] != [' ']:
raise NotCheck
else:
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
except IndexError:
continue
except NotCheck:
continue
# Check to see if red cannons or chariots are in legal range of the black general.
# If there aren't, raise the NotCheck error. If there are,set the black check status to IS IN CHECK
for p in cannon_moves:
try:
if board[o + p] == ['RC']:
str_int = [char for char in str(p)]
check_test = 0
if len(str_int) == 3:
for t in range(1, int(str_int[1])):
y = t * -10
if board[o + y] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if len(str_int) == 2:
if str_int[0] == '-':
for t in range(1, int(str_int[1])):
y = t * -1
if board[o + y] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
else:
for t in range(1, int(str_int[0])):
y = t * 10
if board[o + y] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if len(str_int) == 1:
for t in range(1, int(str_int[0])):
if board[o + t] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
except NotCheck:
continue
except IndexError:
continue
try:
if board[o + p] == ["RN"]:
if p == 1 or p == -1 or p == 10 or p == -10:
raise NotCheck
str_int = [char for char in str(p)]
check_test = 0
if len(str_int) == 3:
for t in range(1, int(str_int[1])):
y = t * -10
if board[o + y] != [' ']:
check_test += 1
if check_test != 1:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if len(str_int) == 2:
if str_int[0] == '-':
for t in range(1, int(str_int[1])):
y = t * -1
if board[o + y] != [' ']:
if board[o + y][0] in end_check:
continue
check_test += 1
if check_test != 1:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
else:
for t in range(1, int(str_int[0])):
y = t * 10
if board[o + y] != [' ']:
check_test += 1
if check_test != 1:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
if len(str_int) == 1:
for t in range(1, int(str_int[0])):
if board[o + t] != [' ']:
if board[o + t][0] in end_check:
continue
check_test += 1
if check_test != 1:
raise NotCheck
if black_check == 'NOT IN CHECK':
black_check = 'IN CHECK'
self.__black_check = black_check
return
if black_check != 'NOT IN CHECK' and move_counter % 2 == 0:
return
except NotCheck:
continue
except IndexError:
continue
# If all tests pass, the black general status is set to NOT IN CHECK
black_check = 'NOT IN CHECK'
self.__black_check = black_check
# Find the red general in the board
for o in range(0, 110):
if board[o] == ['RG']:
# Check to see if the black general is across the board from the red general. If so, check to see
# If any pieces are between them. If there are, raise the NotCheck error. If there aren't,
# set the red check status to IS IN CHECK
for p in red_general_moves:
try:
if board[o + p] == ['BG']:
str_int = [char for char in str(p)]
check_test = 0
for t in range(1, int(str_int[1])):
y = t * -10
if board[o + y] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
except IndexError:
continue
except NotCheck:
continue
# Check to see if black soldiers are in range of the black general. If there aren't, raise the
# NotCheck error. If there are, set the red check status to IS IN CHECK
for p in soldier_moves:
try:
if board[o + p] == ['BS']:
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
except:
continue
# Check to see if black horses are in legal range of the black general. If there aren't, raise the
# NotCheck error. If there are, set the red check status to IS IN CHECK
for p in horse_moves:
try:
if board[o + p] == ['BH']:
if p == -8:
if board[o - 9] != [' ']:
raise NotCheck
else:
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if p == 8:
if board[o + 9] != [' ']:
raise NotCheck
else:
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if p == -12:
if board[o - 11] != [' ']:
raise NotCheck
else:
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if p == 12:
if board[o + 11] != [' ']:
raise NotCheck
else:
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if p == -19:
if board[o - 9] != [' ']:
raise NotCheck
else:
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if p == 19:
if board[o + 9] != [' ']:
raise NotCheck
else:
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if p == -21:
if board[o - 11] != [' ']:
pass
else:
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if p == 21:
if board[o + 11] != [' ']:
raise NotCheck
else:
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
except IndexError:
continue
except NotCheck:
continue
# Check to see if black cannons or chariots are in legal range of the black general.
# If there aren't, raise the NotCheck error. If there are,set the red check status to IS IN CHECK
for p in cannon_moves:
try:
if board[o + p] == ['BC']:
str_int = [char for char in str(p)]
check_test = 0
if len(str_int) == 3:
for t in range(1, int(str_int[1])):
y = t * -10
if board[o + y] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if len(str_int) == 2:
if str_int[0] == '-':
for t in range(1, int(str_int[1])):
y = t * -1
if board[o + y] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
else:
if str_int[0] == '1':
red_check = 'IN CHECK'
self.__red_check = red_check
return
for t in range(1, int(str_int[0])):
y = t * 10
if board[o + y] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if len(str_int) == 1:
for t in range(1, int(str_int[0])):
if board[o + t] != [' ']:
check_test += 1
if check_test > 0:
raise NotCheck
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
except NotCheck:
continue
except IndexError:
continue
try:
if board[o + p] == ["BN"]:
if p == 1 or p == -1 or p == 10 or p == -10:
raise NotCheck
str_int = [char for char in str(p)]
check_test = 0
if len(str_int) == 3:
for t in range(1, int(str_int[1])):
y = t * -10
if board[o + y] != [' ']:
check_test += 1
if check_test != 1:
raise NotCheck
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if len(str_int) == 2:
if str_int[0] == '-':
for t in range(1, int(str_int[1])):
y = t * -1
if board[o + y] != [' ']:
if board[o + y][0] in end_check:
continue
check_test += 1
if check_test != 1:
raise NotCheck
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
else:
for t in range(1, int(str_int[0])):
y = t * 10
if board[o + y] != [' ']:
check_test += 1
if check_test != 1:
raise NotCheck
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
if len(str_int) == 1:
for t in range(1, int(str_int[0])):
if board[o + t] != [' ']:
if board[o + t][0] in end_check:
continue
check_test += 1
if check_test != 1:
raise NotCheck
if red_check == 'NOT IN CHECK':
red_check = 'IN CHECK'
self.__red_check = red_check
return
if red_check != 'NOT IN CHECK' and move_counter % 2 != 0:
return
except NotCheck:
continue
except IndexError:
continue
# If all tests pass, the red general status is set to NOT IN CHECK
red_check = 'NOT IN CHECK'
self.__red_check = red_check
return
# a function that is passed locations and sees if they are viable
def viable_move(self, left, to):
# Initialize variables for a copy of the board for testing, the state of the game, a move counter, the current
# Location and the destination location
board = self.__board[:]
game_state = self.__game_state
move_counter = self.__move_counter
spot_from = left
spot_to = to
# If the game has a winner, return False
if game_state != 'UNFINISHED':
return False
# Check to see if the spot you are trying to move from is empty. If so, return False. If not,
# Split the string of the piece and put that into a variable as a list. Create a variable to hold the second
# Letter of the piece.
if board[spot_from] == [' ']:
return False
else:
piece = [char for char in board[spot_from][0]]
piece_check = piece[1]
lett = [char for char in board[spot_from][0]]
# If it is not the turn of the opposing player to the piece that is trying to move, return False
if lett[0] == 'R':
if move_counter % 2 != 0:
return False
if lett[0] == 'B':
if move_counter % 2 == 0:
return False
# Take the second letter of the piece and run the relevant movement function to it. If it returns true,
# Continue. If it is not a viable move, return False.
if piece_check == 'E':
try:
XiangqiGame.elephant_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'G':
try:
XiangqiGame.general_move(self, 0, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'A':
try:
XiangqiGame.advisor_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'H':
try:
XiangqiGame.horse_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'C':
try:
XiangqiGame.chariot_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'N':
try:
XiangqiGame.cannon_move(self, 0, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
if piece_check == 'S':
try:
XiangqiGame.soldier_move(self, loc=spot_from, dest=spot_to)
except NotALegalMove:
return False
# Take the piece from the spot it currently is, cast it into a variable, put a blank board string in its place,
# And move it to the destination.
piece = board[spot_from]
board[spot_from] = [' ']
board[spot_to] = piece
# Check to see if the player on the test board is still in check
XiangqiGame.check_check(self, board=board)
# If the player you are testing for is still in check, return False.
if move_counter % 2 == 0:
if XiangqiGame.is_in_check(self, 'red'):
board[spot_from] = piece
board[spot_to] = [' ']
return False
if move_counter % 2 != 0:
if XiangqiGame.is_in_check(self, 'black'):
board[spot_from] = piece
board[spot_to] = [' ']
return False
return True
# A custom error that is raised if the piece is not in check
class NotCheck(Exception):
pass
# A custom error that is raised if the move is not legal
class NotALegalMove(Exception):
pass
# A custom error that is raised if the move puts the general in check
class GeneralCheck(Exception):
pass
|
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helper_new
import word2vec_helpers
import pandas as pd
# Parameters
# ==================================================
# Data Parameters
#./是当前目录 ../是父级目录 /是根目录
tf.flags.DEFINE_string("input_text_file", "G:/data_test.csv", "Label file for test text data source.")
# tf.flags.DEFINE_string("input_text_file", "../data/data2.csv", "Test text data source to evaluate.")
tf.flags.DEFINE_string("single_url",None,"single url to evaluate")
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_boolean("eval_train", True, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS.flag_values_dict()
def test(path,checkpoint_path):
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# validate
# ==================================================
# validate checkout point file
checkpoint_file = tf.train.latest_checkpoint(
checkpoint_path) # 我们可以使用tf.train.latest_checkpoint()来自动获取最后一次保存的模型。
if checkpoint_file is None:
print("Cannot find a valid checkpoint file!")
exit(0)
print("Using checkpoint file : {}".format(checkpoint_file))
# 加载word2vec_model
# 若出现”./”开头的参数,会从”./”开头的参数的上一个参数开始拼接。
# trained_word2vec_model_file = os.path.join(checkpoint_path, "..", "trained_word2vec.model")
trained_word2vec_model_file ='F:/2018年暑假科研/CNN/CNN相关文件/DetectMaliciousURL-master/model/runs/1539606045/trained_word2vec.model'
if not os.path.exists(trained_word2vec_model_file):
print("Word2vec model file \'{}\' doesn't exist!".format(trained_word2vec_model_file))
print("Using word2vec model file : {}".format(trained_word2vec_model_file))
# validate training params file
training_params_file = 'F:/2018年暑假科研/CNN/CNN相关文件/DetectMaliciousURL-master/model/runs/1539606045/training_params.pickle'
# training_params_file = os.path.join(checkpoint_path, "..", "training_params.pickle")
if not os.path.exists(training_params_file):
print("Training params file \'{}\' is missing!".format(training_params_file))
print("Using training params file : {}".format(training_params_file))
# Load params
params = data_helper_new.loadDict(training_params_file)
print("type of params: {}".format(type(params)))
num_labels = int(params['num_labels'])
max_document_length = int(params['max_document_length'])
# x_raw1, y_test = data_helper_new.load_data_and_labels(FLAGS.input_text_file)
x_raw, name = data_helper_new.load_data_and_names(path)
# x_raw+=x_raw1
# Get Embedding vector x_test
sentences, max_document_length = data_helper_new.padding_sentences(x_raw, '<PADDING>',
padding_sentence_length=max_document_length)
x_test = np.array(word2vec_helpers.embedding_sentences(sentences, file_to_load=trained_word2vec_model_file))
print("x_test.shape = {}".format(x_test.shape))
# Evaluation
# ==================================================
print("\nEvaluating...\n")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_path)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
# tf.train.import_meta_graph函数给出model.ckpt-n.meta的路径后会加载图结构,并返回saver对象
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
# saver.restore函数给出model.ckpt-n的路径后会自动寻找参数名-值文件进行加载
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
# tf.Graph.get_operation_by_name(name) 根据名称返回操作节点
input_x = graph.get_operation_by_name("input_x").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
batches = data_helper_new.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)
all_scores = []
def add_score(score):
for k in score:
all_scores.append(k)
scores = graph.get_operation_by_name("output/scores").outputs[0]
for x_test_batch in batches:
score = sess.run(scores, {input_x: x_test_batch, dropout_keep_prob: 1.0})
add_score(score)
# Save the evaluation to a csv
print(all_scores)
all_data = []
def cal_data():
for k in range(len(name)):
data = []
data.append(name[k])
all_scores[k][0]=abs(all_scores[k][0])
data.append(all_scores[k][0])
all_data.append(data)
cal_data()
test3 = pd.DataFrame(data=all_data)
test3.to_csv('F:/2018年暑假科研/CNN/my_clone/method_sim.csv', encoding="gbk")
if __name__ == '__main__':
path="F:/2018年暑假科研/CNN/my_clone/method_merge.csv"
checkpoint_path = "F:/2018年暑假科研/CNN/CNN相关文件/DetectMaliciousURL-master/model/runs/1539606045/checkpoints"
test(path,checkpoint_path)
|
# -*- coding: utf-8 -*-
# @Time : 2018/9/27 17:06
# @Author : HLin
# @Email : linhua2017@ia.ac.cn
# @File : data_utils.py
# @Software: PyCharm
import os
import sys
sys.path.append(os.path.abspath('..'))
from tqdm import tqdm
import numpy as np
from datasets.Voc_Dataset import VOCDataLoader
from datasets.cityscapes_Dataset import City_DataLoader
dataloader = {
'voc2012': VOCDataLoader,
'voc2012_aug': VOCDataLoader,
'cityscapes': City_DataLoader
}
def calculate_weigths_labels(args):
# Create an instance from the data loader
data = dataloader[args.dataset](args)
z = np.zeros((args.num_classes,))
# Initialize tqdm
tqdm_batch = tqdm(data.data_loader, total=data.num_iterations)
for _, y, _ in tqdm_batch:
y = y.numpy()
mask = (y >= 0) & (y < args.num_classes)
labels = y[mask].astype(np.uint8) #.ravel().tolist()
count_l = np.bincount(labels, minlength=args.num_classes)
z += count_l
tqdm_batch.close()
# ret = compute_class_weight(class_weight='balanced', classes=np.arange(21), y=np.asarray(labels, dtype=np.uint8))
total_frequency = np.sum(z)
print(z)
print(total_frequency)
class_weights = []
for frequency in z:
class_weight = 1 / (np.log(1.02 + (frequency / total_frequency)))
class_weights.append(class_weight)
ret = np.array(class_weights)
classes_weights_path = os.path.join('/data/linhua/VOCdevkit/pretrained_weights',args.dataset+'classes_weights_log')
np.save(classes_weights_path, ret)
print(ret)
|
""" Quick RTMP connection client """
# Info:
# Credits:
# Structure:
# Predominantly consisting of a class structure with 3 main classes:
# - Pre-connection settings
# - Normal packet handling via NetConnection
# - Other packet handling via NetStream
# AMF Encoding/Decoding:
# The process of encoding or decoding AMF formats should be done via PyAMF/AMFast - both of which
# are supported in the amfast library.
# Pre-connection:
# 1. Should handle all the necessary processes to establish a connection between client and server
# all before the exchange of whatever standard of formats has begun.
# NetConnection:
# 1. Should have all the procedures necessary to establish the initial connection to a server.
# Including handshake & connection packet.
# NetStream:
class PreConnection:
""" """
def __init__(self):
pass
class NetConnection:
""" """
def __init__(self):
pass
class NetStream:
""" """
def __init__(self):
pass
|
import struct
from Crypto.Cipher import AES
## Constants for packet decoding fields
# Frame Control Field
DOT154_FCF_TYPE_MASK = 0x0007 #: Frame type mask
DOT154_FCF_SEC_EN = 0x0008 #: Set for encrypted payload
DOT154_FCF_FRAME_PND = 0x0010 #: Frame pending
DOT154_FCF_ACK_REQ = 0x0020 #: ACK request
DOT154_FCF_INTRA_PAN = 0x0040 #: Intra-PAN activity
DOT154_FCF_DADDR_MASK = 0x0C00 #: Destination addressing mode mask
DOT154_FCF_VERSION_MASK = 0x3000 #: Frame version
DOT154_FCF_SADDR_MASK = 0xC000 #: Source addressing mask mode
# Frame Control Field Bit Shifts
DOT154_FCF_TYPE_MASK_SHIFT = 0 #: Frame type mask mode shift
DOT154_FCF_DADDR_MASK_SHIFT = 10 #: Destination addressing mode mask
DOT154_FCF_VERSION_MASK_SHIFT = 12 #: Frame versions mask mode shift
DOT154_FCF_SADDR_MASK_SHIFT = 14 #: Source addressing mask mode shift
# Address Mode Definitions
DOT154_FCF_ADDR_NONE = 0x0000 #: Not sure when this is used
DOT154_FCF_ADDR_SHORT = 0x0002 #: 4-byte addressing
DOT154_FCF_ADDR_EXT = 0x0003 #: 8-byte addressing
DOT154_FCF_TYPE_BEACON = 0 #: Beacon frame
DOT154_FCF_TYPE_DATA = 1 #: Data frame
DOT154_FCF_TYPE_ACK = 2 #: Acknowledgement frame
DOT154_FCF_TYPE_MACCMD = 3 #: MAC Command frame
DOT154_CRYPT_NONE = 0x00 #: No encryption, no MIC
DOT154_CRYPT_MIC32 = 0x01 #: No encryption, 32-bit MIC
DOT154_CRYPT_MIC64 = 0x02 #: No encryption, 64-bit MIC
DOT154_CRYPT_MIC128 = 0x03 #: No encryption, 128-bit MIC
DOT154_CRYPT_ENC = 0x04 #: Encryption, no MIC
DOT154_CRYPT_ENC_MIC32 = 0x05 #: Encryption, 32-bit MIC
DOT154_CRYPT_ENC_MIC64 = 0x06 #: Encryption, 64-bit MIC
DOT154_CRYPT_ENC_MIC128 = 0x07 #: Encryption, 128-bit MIC
class Dot154PacketParser:
def __init__(self):
'''
Instantiates the Dot154PacketParser class.
'''
# State values for AES-CTR mode
self.__crypt_blockcntr = 1
self.__crypt_A_i = []
return
def __crypt_counter(self):
'''
Used for AES-CTR mode after populating self.__crypt_A_i
Don't call this directly. Just don't.
'''
retindex = self.__crypt_blockcntr
self.__crypt_blockcntr += 1
return self.__crypt_A_i[retindex]
def decrypt(self, packet, key):
'''
Decrypts the specified packet. Returns empty string if the packet is
not encrypted, or if decryption MIC validation fails.
@type packet: String
@param packet: Packet contents.
@type key: String
@param key: Key contents.
@rtype: String
@return: Decrypted packet contents, empty string if not encrypted or if
decryped MIC fails validation.
'''
# Retrieve the data payload from the packet contents
encpayload = packet[-self.payloadlen(packet):]
if ord(encpayload[0]) != DOT154_CRYPT_ENC_MIC64:
raise Exception("Unsupported security level in packet: 0x%02x." % ord(encpayload[0]))
if len(key) != 16:
raise Exception("Invalid key length (%d)." % len(key))
# Encrypted content is:
# Sec Level | 4-byte counter | Flags | Ciphertext | Encrypted 8-byte MIC
if self.payloadlen(packet) < 15:
raise Exception("Payload length too short (%d)." % self.payloadlen(packet))
nonce = self.nonce(packet)
# c = ciphertext payload including trailing 8-byte encrypted MIC
c = encpayload[-9:]
# 1. Parse C||U where U is the right-most bytes for MIC and C is the
# remaining bytes (representing encrypted packet payload content)
C = c[0:-8]
U = c[-8:]
# 2. Form cipherText by padding C to a block size
cipherText = C + ("\x00" * (16 - len(C)%16))
# 3. Form 1-byte flags field = 01
# XXX will vary when L changes
flags = "\x01"
# 4. Define 16-octet A_i consisting of:
# Flags || Nonce || 2-byte counter i for i=0,1,2, ...
# A[0] is for authenticity check, A[1] is for the first block of data,
# A[2] is for the 2nd block of data, if C > 16
self.__crypt_A_i = []
for i in xrange(0, (1+1+(len(C)/16))):
self.__crypt_A_i.append(flags + nonce + struct.pack(">H",i))
# 5. Decrypt cipherText producing plainText (observed)
self.__crypt_blockcntr = 1 # Start at A[1] to decrypt
crypt = AES.new(key, AES.MODE_CTR, counter=self.__crypt_counter)
plainText = crypt.decrypt(cipherText)[0:len(C)]
# 6. Compute S_0 as E(Key, A[0])
crypt = AES.new(key, AES.MODE_CBC, "\x00"*16)
S_0 = crypt.encrypt(self.__crypt_A_i[0])
# 7. Compute MIC (T) observed as S_0 XOR U
T_obs = []
for i in xrange(0,len(S_0[0:8])):
T_obs.append((ord(S_0[i]) ^ ord(U[i])))
# Convert T_obs back into a string (please, I need Python help)
T_obs = ''.join(struct.pack("B",i) for i in T_obs)
# 8. Compute a over packet contents before ciphertext payload
# This is the 802.15.4 header,plus the security level, frame
# counter and flags byte (01)
hdrlen = self.hdrlen(packet)
a = packet[0:hdrlen] + packet[hdrlen:hdrlen+6]
# 9. Concatenate L(a) of 2-byte length a with a
addAuthData = struct.pack(">H",len(a)) + a
# 10. Pad addAuthData to an even block size
addAuthData += ("\x00" * (16 - len(addAuthData)%16))
# 11. Form AuthData by concatenating addAuthData and PlaintextData
# Pad plainText to an even block size
plainTextPadded = plainText + ("\x00" * (16 - len(plainText)%16))
authData = addAuthData + plainTextPadded
# 12. Perform authData transformation into B[0], B[1], ..., B[i]
B = "\x59" + nonce + "\x00\x01" + authData
# 13. Calculate the MIC (T) calculated with CBC-MAC
iv = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
for i in xrange(0, len(B)/16):
crypt = AES.new(key, AES.MODE_CBC, iv)
Bn = B[i*16:(i*16)+16]
iv = crypt.encrypt(Bn)
T_calc = iv[0:8]
# 14. Compare
if T_obs == T_calc:
return plainText
else:
return ""
def pktchop(self, packet):
'''
Chops up the specified packet contents into a list of fields. Does
not attempt to re-order the field values for parsing. ''.join(X) will
reassemble original packet string. Fields which may or may not be
present (such as the Source PAN field) are empty if they are not
present, keeping the list elements consistent, as follows:
FCF | Seq# | DPAN | DA | SPAN | SA | [Beacon Data] | PHY Payload
If the packet is a beacon frame, the Beacon Data field will be populated
as a list element in the format:
Superframe Spec | GTS Fields | Pending Addr Counts | Proto ID | Stack Profile/Profile Version | Device Capabilities | Ext PAN ID | TX Offset | Update ID
An exception is raised if the packet contents are too short to
decode.
@type packet: String
@param packet: Packet contents.
@rtype: list
@return: Chopped contents of the 802.15.4 packet into list elements.
'''
pktchop = ['', '', '', '', '', '', [], '']
pktchop[0] = packet[0:2]
# Sequence number
pktchop[1] = packet[2]
# Byte swap
fcf = struct.unpack("<H",pktchop[0])[0]
# Check if we are dealing with a beacon frame
if (fcf & DOT154_FCF_TYPE_MASK) == DOT154_FCF_TYPE_BEACON:
beacondata = ["", "", "", "", "", "", "", "", "", ""]
try:
# 802.15.4 fields, SPAN and SA
pktchop[4] = packet[3:5]
pktchop[5] = packet[5:7]
offset = 7
# Superframe specification
beacondata[0] = packet[offset:offset+2]
offset+=2
# GTS data
beacondata[1] = packet[offset]
offset+=1
# Pending address count
beacondata[2] = packet[offset]
offset+=1
# Protocol ID
beacondata[3] = packet[offset]
offset+=1
# Stack Profile version
beacondata[4] = packet[offset]
offset+=1
# Capability information
beacondata[5] = packet[offset]
offset+=1
# Extended PAN ID
beacondata[6] = packet[offset:offset+8]
offset+=8
# TX Offset
beacondata[7] = packet[offset:offset+3]
offset+=3
# Update ID
beacondata[8] = packet[offset]
offset+=1
except:
pass
pktchop[6] = beacondata
else:
# Not a beacon frame
# DPAN
pktchop[2] = packet[3:5]
offset = 5
# Examine the destination addressing mode
daddr_mask = (fcf & DOT154_FCF_DADDR_MASK) >> 10
if daddr_mask == DOT154_FCF_ADDR_EXT:
pktchop[3] = packet[offset:offset+8]
offset+=8
elif daddr_mask == DOT154_FCF_ADDR_SHORT:
pktchop[3] = packet[offset:offset+2]
offset+=2
# Examine the Intra-PAN flag
if (fcf & DOT154_FCF_INTRA_PAN) == 0:
pktchop[4] = packet[offset:offset+2]
offset+=2
# Examine the source addressing mode
saddr_mask = (fcf & DOT154_FCF_SADDR_MASK) >> 14
if daddr_mask == DOT154_FCF_ADDR_EXT:
pktchop[5] = packet[offset:offset+8]
offset+=8
elif daddr_mask == DOT154_FCF_ADDR_SHORT:
pktchop[5] = packet[offset:offset+2]
offset+=2
# Append remaining payload
if offset < len(packet):
pktchop[7] = packet[offset:]
return pktchop
def hdrlen(self, packet):
'''
Returns the length of the 802.15.4 header.
@type packet: String
@param packet: Packet contents to evaluate for header length.
@rtype: Int
@return: Length of the 802.15.4 header.
'''
# Minimum size is 11 (2 bytes FCF + 1 byte SEQ + 2 bytes DPAN +
# 2 bytes DstAddr + 2 bytes SPAN + 2 bytes SrcAddr)
# XXX Need to validate this logic based on specification
if (len(packet) < 9):
raise Exception("Packet too small, %d bytes." % len(packet))
# Start with minimum size, increase as needed based on FCF flags
plen = 9
# Byte swap
fcf = struct.unpack("<H",packet[0:2])[0]
# Examine the destination addressing mode
if (fcf & DOT154_FCF_DADDR_MASK) >> 10 == DOT154_FCF_ADDR_EXT:
plen += 6 # 8-byte addressing is in use, increasing addr 6 bytes
# Examine the source addressing mode
if (fcf & DOT154_FCF_SADDR_MASK) >> 14 == DOT154_FCF_ADDR_EXT:
plen += 6 # 8-byte addressing is in use, increasing addr 6 bytes
# Examine the Intra-PAN flag
if (fcf & DOT154_FCF_INTRA_PAN) == 0:
plen += 2 # Intra-PAN is false, source PAN 2-bytes is present
return plen
def payloadlen(self, packet):
'''
Returns the length of the 802.15.4 payload.
@type packet: String
@param packet: Packet contents to evaluate for header length.
@rtype: Int
@return: Length of the 802.15.4 payload.
'''
return len(packet) - self.hdrlen(packet)
def nonce(self, packet):
'''
Returns the nonce of the 802.15.4 packet. Returns empty string for
unencrypted frames.
@type packet: String
@param packet: Packet contents to evaluate for nonce.
@rtype: String
@return: Nonce, empty when the frame is not encrypted.
'''
# Byte swap
fcf = struct.unpack("<H",packet[0:2])[0]
if (fcf & DOT154_FCF_SEC_EN) == 0:
# Packet is not encrypted
return ""
# Nonce formation is Src Addr || Frame Counter || Security Level
pchop = self.pktchop(packet)
# SA is the 5th list element, reverse it
noncep1 = pchop[5][::-1]
# Retrieve the data payload from the packet contents
encpayload = packet[-self.payloadlen(packet):]
# First byte of encrypted payload is the security level
noncep3 = encpayload[0]
# The next 4 bytes of the encrypted payload is the frame counter, rev
noncep2 = encpayload[1:5][::-1]
return noncep1 + noncep2 + noncep3
|
N = input()
N = int(N)
count = 0
min = -1
f = 0;
#python에서 //은 몫을 나타냄~
if N % 5 == 0 :
count = N / 5
N = N % 5
min = count
while N > 5*f :
Num = N
count = 0
count += f
Num = Num - 5*f
if Num % 3 == 0 :
count += Num // 3
if min == -1 :
min = count
elif count < min :
min = count
f = f + 1
#5의 배수가 크면 무조건 이득
print(int(min))
'''
def func(num):
if(num<3):
return -1
else:
for i in range((num//3)+1):
for j in range((num//5)+1):
result= 3*i + 5*j
if(result==num):
return i+j
return -1
k=int(input())
print(func(k))
내가 원했던 DP solution
'''
|
from django.db import models
# Create your models here.
class Preguntas(models.Model):
pregunta = models.CharField(max_length=200)
pub_date = models.DateTimeField()
def __str__(self):
return self.pregunta
class Opciones(models.Model):
Pregunta = models.ForeignKey(Preguntas)
opcion_texto = models.CharField(max_length=100)
votos = models.IntegerField(default=0)
def __str__(self):
return self.opcion_texto
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 17:04:58 2018
@author: kai
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 17:18:49 2018
@author: kai
"""
import numpy as np
import matplotlib.pyplot as pt
from scipy.spatial.distance import cdist
def SAW(length, max_algs, X,Y):
global epsilon, neighbours
neighbours = 0
epsilon = 0.269
#below are the 47 transformation matrices which can be applied
matrices = np.array([[[0,-1],[1,0]],[[-1,0],[0,-1]],
[[0,1],[-1,0]], [[1,0],[0,-1]], [[-1,0], [0,1]], [[0,1], [1,0]], [[0,-1], [-1,0]]])
global X2,Y2
#X = np.arange(length) #initial line in x-axis
# Y = np.zeros(length) #y-axis initially zeros
# Z = np.zeros(length) #x-axis initially zeros
X2 = np.copy(X)
Y2 = np.copy(Y)
#loop for number of times pivot applied
algs = 0
while algs < max_algs:
X3 = np.copy(X2)
Y3 = np.copy(Y2)
pivot = np.random.randint(1,length - 2)
rand_matrix = np.random.randint(0,len(matrices)-1)
trans_matrix = matrices[rand_matrix]
#loop for applying pivot to end of walk
j = pivot + 1
while j < length:
[X2[j], Y2[j]] = trans_matrix.dot(([X2[j] - X2[pivot], Y2[j] - Y2[pivot]])) + [X2[pivot], Y2[pivot]]
j = j + 1
#check for self avoidance
k = 0
overlap = False
while k < pivot:
l = pivot
while l < length:
if X2[k] == X2[l] and Y2[k] == Y2[l]:
overlap = True
break
l = l + 1
if overlap:
#print('overlap')
break
k = k + 1
#if not self avoiding then revert back to config at beginning of loop
if overlap == False:
old_neighbours = neighbours
neighbours = 0
for i in range(0,length):
for j in range(i+2,length):
if (X2[i] - X2[j])**2 + (Y2[i] - Y2[j])**2 == 1:
neighbours = neighbours + 1
if neighbours < old_neighbours:
acc = np.random.rand()
if acc > np.exp((epsilon*(neighbours-old_neighbours))):
overlap = True
#print('rejection!!!!')
neighbours = old_neighbours
if overlap:
X2 = np.copy(X3)
Y2 = np.copy(Y3)
algs = algs + 1
#Data collection
temp_length = 30
rsq = []
#x = np.arange(0,300)
X0 = np.arange(temp_length)
Y0 = np.zeros(temp_length)
'''for i in range(0,200):
print(i)
SAW(temp_length, i, X0, Y0),k
rsq.append(X2[temp_length - 1]**2 + Y2[temp_length - 1]**2)
pt.plot(x,rsq)
'''
SAW(temp_length, 300, X0, Y0)
for i in range(0, 10000):
print(i)
SAW(temp_length, 5, X2, Y2)
rsq.append((X2[temp_length-1]**2 + Y2[temp_length-1]**2))
print(np.mean(rsq))
pt.plot(X2, Y2)
#plot of fraction of pivot attempts that are successful vs N in 2d and 3d, expecting power law?
#suggests log plot, Madras and Sokal, original pivot algorithm paper, READ!! journal of statistical physics
#expect as epsilon increases, walk become more collapsed
#is there a value of epsilon so flory exponent = 1/2???
#Papers by grassberger, READ!!!, see notes
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.