text stringlengths 8 6.05M |
|---|
from operator import ne
import os
from flask import Flask, request, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from models import setup_db, Movies,Actors, db_drop_and_create_all
from flask_cors import CORS
from auth import AuthError, requires_auth
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
setup_db(app)
CORS(app)
# db_drop_and_create_all()
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type, Authorization')
response.headers.add('Access-Control-Allow-Headers',
'GET, POST, PATCH, DELETE, OPTIONS')
return response
@app.route('/')
def get_greeting():
excited = os.environ['EXCITED']
greeting = "Welcome"
if excited == 'true': greeting = greeting + "!!!!!"
return greeting
@app.route('/coolkids')
def be_cool():
return "Be cool, man, be coooool! You're almost a FSND grad!"
# GET Movies
@app.route('/movies')
def movies():
movies = Movies.query.all()
formatted_movies=[movie.format() for movie in movies]
if len(movies)==0:
abort(404)
return jsonify({
'success':True,
"movies": formatted_movies
}),200
# POST Movies
@app.route('/movies',methods=['POST'])
@requires_auth('post:movies')
def post_movies(jwt):
body=request.get_json()
new_title= body.get('title')
new_release_date=body.get('release_date')
try:
movie = Movies(title=new_title,release_date=new_release_date)
movie.insert()
formatted_movie = movie.format()
return jsonify({
'success':True,
'movies': formatted_movie
}),200
except:
abort(422)
# PATCH Movies
@app.route('/movies/<int:id>',methods=['PATCH'])
@requires_auth('patch:movies')
def patch_movie(jwt,id):
movie = Movies.query.filter(Movies.id==id).one_or_none()
if movie is None:
abort(404)
body=request.get_json()
movie.title= body.get('title')
movie.new_release_date=body.get('release_date')
movie.update()
formatted_movie = movie.format()
return jsonify({
'success':True,
"movie": formatted_movie
}),200
# DELETE Movies
@app.route('/movies/<int:id>',methods=['DELETE'])
@requires_auth('delete:movies')
def delete_movie(jwt,id):
try:
movie = Movies.query.filter(Movies.id==id).one_or_none()
if movie is None:
abort(404)
movie.delete()
return jsonify({
"success": True,
"delete": id
}),200
except:
abort(400)
# GET Actors
@app.route('/actors')
def actors():
actors = Actors.query.all()
formatted_actors=[actor.format() for actor in actors]
if len(actors)==0:
abort(404)
return jsonify({
'success':True,
"actors": formatted_actors
}),200
# Error Handling
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
@app.errorhandler(404)
def not_found(error):
return jsonify({
'success': False,
'error':404,
'message': 'Not found'
}),404
@app.errorhandler(AuthError)
def auth_error(e):
return jsonify({
"success": False,
"error": e.status_code,
"message": e.error
}), e.status_code
@app.errorhandler(401)
def unauthorized(error):
return jsonify({
"success": False,
"error":401,
"message": 'Unauthorized'
}),401
@app.errorhandler(403)
def forbidden(error):
return jsonify({
'success': False,
'error':403,
'message': 'Forbidden'
}),403
@app.errorhandler(400)
def bad_request(error):
return jsonify({
'success': False,
'error':400,
'message': 'Bad request'
}),400
return app
app = create_app()
if __name__ == '__main__':
port = int(os.environ.get("PORT",5000))
app.run(host='127.0.0.1',port=port,debug=True)
|
import os
postgres_endpoint = os.environ['DP_ACCTMGMT_POSTGRES_URL']
bucket_name = os.environ['DP_ACCTMGMT_S3_BUCKET']
bucket_file_name = os.environ['DP_ACCTMGMT_EXPORTED_FILENAME']
aws_access_key_id = os.environ['DP_ACCTMGMT_AWS_ACCESS_KEY_ID']
aws_secret_access_key = os.environ['DP_ACCTMGMT_AWS_SECRET_ACCESS_KEY']
aws_region = os.environ['DP_ACCTMGMT_AWS_REGION']
# optional
aws_role_arn = os.environ['DP_ACCTMGMT_AWS_ROLE_ARN'] if 'DP_ACCTMGMT_AWS_ROLE_ARN' in os.environ else None
aws_profile_local = os.environ['DP_ACCTMGMT_AWS_PROFILE_LOCAL'] if 'DP_ACCTMGMT_AWS_PROFILE_LOCAL' in os.environ else None |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, DBF and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class AccommodationPackagePricingItem(Document):
def add_or_update_item(self):
items = [frappe._dict({"item_code": "%s %s" % (self.package, self.room_type), "rate": self.rate}),
frappe._dict({"item_code": "%s %s Extra Bed" % (self.package, self.room_type), "rate": self.extra_bed_rate})]
def create_item(item_code, rate):
frappe.get_doc({
"doctype": "Item",
"item_code": item_code,
"item_name": item_code,
"description": item_code,
"item_group": "Services",
"standard_rate": rate,
"is_stock_item": 0,
"is_purchase_item": 0,
"sales_uom": "Nos",
}).insert()
for i in items:
if not frappe.db.exists("Item", i["item_code"]):
create_item(i["item_code"], i["rate"])
else:
doc = frappe.get_doc("Item", i["item_code"])
doc.update({"standard_rate": i["rate"]})
doc.save()
|
# COMP90024 Team 1
# Albert, Darmawan (1168452) - Jakarta, ID - darmawana@student.unimelb.edu.au
# Clarisca, Lawrencia (1152594) - Melbourne, AU - clawrencia@student.unimelb.edu.au
# I Gede Wibawa, Cakramurti (1047538) - Melbourne, AU - icakramurti@student.unimelb.edu.au
# Nuvi, Anggaresti (830683) - Melbourne, AU - nanggaresti@student.unimelb.edu.au
# Wildan Anugrah, Putra (1191132) - Jakarta, ID - wildananugra@student.unimelb.edu.au
import couchdb
import csv
import tweepy
import json
import pandas as pd
from location_utils import LocationUtils
import os
import time
#Defining constants
MAXIDFILE= os.environ.get('MAXIDFILE') if os.environ.get('MAXIDFILE') != None else 'curr_maxID.txt'
CITYFILE = os.environ.get('CITYFILE') if os.environ.get('CITYFILE') != None else 'city_details.csv'
ADDRESS= os.environ.get('ADDRESS') if os.environ.get('ADDRESS') != None else "http://admin:admin@45.113.235.136:15984/"
DB_NAME = os.environ.get('DB_NAME') if os.environ.get('DB_NAME') != None else "comp90024_tweet_search"
API_TOKENS = os.environ.get('API_TOKENS') if os.environ.get('API_TOKENS') != None else "twitter-api-tokens.csv"
#Getting Credentials for Twitter API
creds_file = pd.read_csv(API_TOKENS,encoding='utf-8',sep=';')
consumer_api_key = creds_file['API_KEY'][1]
consumer_secret_key = creds_file['API_SECRET_KEY'][1]
consumer_access_token = creds_file['ACCESS_TOKEN'][1]
consumer_token_secret = creds_file['ACCESS_TOKEN_SECRET'][1]
#authentication object
auth = tweepy.OAuthHandler(consumer_api_key,consumer_secret_key)
#Set access token and access token secret
auth.set_access_token(consumer_access_token,consumer_token_secret)
#API Object
api = tweepy.API(auth, wait_on_rate_limit=True,parser=tweepy.parsers.JSONParser())
#CouchDB Database Object
server = couchdb.Server(ADDRESS)
db_conn = server[DB_NAME]
#Location Object
location_geojson = LocationUtils()
#A function the coordinates of the 8 capital cities
def load_coordinates(filepath):
coordinates=list()
with open(filepath, mode='r', encoding='utf-8-sig') as file:
reader = csv.reader(file)
for rows in reader:
coordinates.append(rows)
return coordinates
#A function to save the tweet
def save_tweet(tweet_data,location):
#Search if the tweet is within the top 50 cities grid
gridsearch = location_geojson.search_grid(location)
if gridsearch[0] == True:
#Change the _id to id_str of the tweet to avoid duplication in db
#Add aurin location ID and aurin location name
tweet_data['_id'] = tweet_data.pop('id_str')
tweet_data['AURIN_id'] = gridsearch[1]
tweet_data['AURIN_loc_name'] = gridsearch[2]
db_conn.save(tweet_data)
#A function to initiate tweet search api
def search_tweet(location,maxId):
location_details = [float(location[2]),float(location[1])]
join_coor = ','.join(location[1:])
try:
#The max ID helps in the searcher to find tweets until the maximum tweet ID
#the longer the tweet ID indicates that the tweet is most recent
#Hence it will prompt the search APi to search for older tweets
if maxId != '':
tweetlist = api.search(geocode=join_coor,count=100,lang=['en'],max_id = maxId)
else:
tweetlist = api.search(geocode=join_coor,count=100,lang=['en'])
statuses = tweetlist['statuses']
for tweet_data in statuses:
save_tweet(tweet_data,location_details)
print('Saved Successfully')
#Force sleep the searcher when it reaches limit
#It will start again after 20 mins
except tweepy.RateLimitError:
print('Rate Limit Encountered. Going to sleep')
time.sleep(20*60)
except Exception as e:
print(e)
return tweetlist['search_metadata']['max_id']
def main():
capital_cities = load_coordinates(CITYFILE)
maxId=''
while True:
#Iterating through all the capital cities
for city in capital_cities:
try:
#Load the most current maxID
with open(MAXIDFILE, mode='r', encoding='utf-8-sig') as file:
for row in file:
maxId = row
file.close()
new_maxid = search_tweet(city,maxId)
#Write the latest maxID into the file
file2 = open(MAXIDFILE, 'w')
file2.write(str(new_maxid))
file2.close()
except:
pass
main() |
# ----------User Defined Function------------
def product():
n1 = int(input("Enter first number: "))
n2 = int(input("Enter second number: "))
prod = n1*n2
print(n1, " X ", n2, " = ", prod)
product()
# ------------Return statement--------------
def sum():
n1 = int(input("Enter first number: "))
n2 = int(input("Enter first number: "))
s = n1+n2
return s
print("The sum is: ", sum())
# ----------------Lambda Function---------------------
def var(a, b): return (a*b)/2
print("The lambda function evaluated: ", var(3, 5))
# ----------------Factorial Function-------------------
def facto(n):
if n == 1:
return 1
return n*facto(n-1)
num = int(input("Enter a number to find factorial: "))
print("The factorial of", num, "is: ", facto(num))
|
import pygame
import random
def main():
WIDTH = 600
HEIGHT = 400
WHITE = (255, 255, 255)
loop = 0
starting_point = (WIDTH // 2, HEIGHT // 2)
positions = [starting_point]
separation = int(input('Type the separation between dots(int):'))
frequency = int(input('Select the frequency of lines(10-many, 100-few): '))
# The inclination of the initial line
inclination = random.randint(separation*(-1), separation)
available_x = inclination
available_y = separation-inclination
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Paint")
screen.fill(WHITE)
print(available_x, available_y)
while True:
loop += 1
# To close the game when ESC is pressed
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quit()
# To maintain the trail of previous dots
last_pos = positions[len(positions) - 1]
if loop % frequency != 0:
next_position = (last_pos[0]+available_x, last_pos[1]+available_y)
if next_position[0] < 0 or next_position[0] > WIDTH:
available_x = available_x*(-1)
elif next_position[1] < 0 or next_position[1] > HEIGHT:
available_y = available_y*(-1)
positions.append(next_position)
else:
inclination = random.randint(separation * (-1), separation)
available_x = inclination
available_y = separation - inclination
for actual_pos in positions:
pygame.draw.circle(screen, (0, 0, 0), actual_pos, 1)
pygame.display.update()
if __name__ == "__main__":
main()
|
from django.conf import settings
from django.db import models
from django.shortcuts import reverse
from products.models import Product
# LABEL_CHOICES = (
# ('P', 'primary'),
# ('S', 'secondary'),
# ('D', 'danger')
# )
# class Order(models.Model):
# order_no = models.CharField(max_length=10, unique=True)
# start_date = models.DateTimeField(auto_now_add=True)
# ordered_date = models.DateTimeField(blank=True, null=True)
# ordered = models.BooleanField(default=False)
# def __str__(self):
# return self.order_no
class OrderItem(models.Model):
#name= models.ForeignKey(Order, on_delete=models.CASCADE, blank=True, null=True)
order_name = models.CharField(max_length=16)
slug = models.CharField(max_length=16, unique=True )
item = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.IntegerField(default=0)
def __str__(self):
return str(self.slug) |
import sys
from rosalind_utility import parse_fasta
from math import factorial
if __name__ == "__main__":
'''
Given: A collection of k (k≤100) DNA strings of length at most 1 kbp each in FASTA format.
Return: A longest common substring of the collection. (If multiple solutions exist, you may return any
single solution.)
'''
input_lines = sys.stdin.read().splitlines()
strings_dict = parse_fasta(input_lines)
RNA = list(strings_dict.values())[0]
print(factorial(RNA.count("A")) * factorial(RNA.count("C")))
|
#!/usr/bin/env python
#
# See LICENSE file for copyright and license details.
"""Setup.py: build, distribute, clean."""
import os
import sys
from distutils import log
from distutils.core import setup
from glob import glob
setup(
name='qt4reactor',
version='1.0',
license='MIT',
author='Glenn H. Tarbox',
author_email='glenn@tarbox.org',
description='Twisted Qt4 Integration',
long_description='Provides support for Twisted to be driven by the ' \
'Qt mainloop.',
url='https://github.com/ghtdak/qtreactor',
scripts=glob("./bin/*"),
py_modules=['qt4reactor', 'gtrial'],
)
|
#!/bin/python
vowels = ("a", "A", "e", "E", "i", "I", "o", "O", "u", "U")
sentence = raw_input("Enter your sentence: ")
words = sentence.split()
for character in words:
if character[0] in vowels:
print character + "way",
else:
print character[1:] + character[0] + "ay",
|
#!/usr/bin/python3
"""
adds all arguments to a Python list, and then save them to a file
"""
import sys
save_to_json_file = __import__('5-save_to_json_file').save_to_json_file
load_from_json_file = __import__('6-load_from_json_file').load_from_json_file
open("add_item.json", "a")
try:
l = load_from_json_file("add_item.json")
except ValueError:
l = []
save_to_json_file(l + sys.argv[1:], "add_item.json")
|
import nltk.lm.preprocessing as prep
from nltk.util import bigrams
def freq(text):
freq_dict = {}
for word in text:
if word in freq_dict:
freq_dict[word] += 1
else:
freq_dict[word] = 1
return freq_dict
def bigr_freq(text):
freq_dict = {}
for sent in text:
bigr = bigrams(sent)
for first, second in bigr:
if first in freq_dict:
d = freq_dict[first]
if second in d:
d[second] += 1
else:
d[second] = 1
else:
d = {second: 1}
freq_dict[first] = d
return freq_dict
def perp(text):
bigr = bigrams(text.split())
probab = 1
for first, second in bigr:
d = bigram_dict[first]
if second in d:
p = d[second] / freq_dict[first]
else:
p = 0
print(p)
probab *= p
return pow(probab, -(1 / 4))
with open('train_corpus', encoding='UTF-8') as file:
text = []
for line in file.readlines():
text.append(line.strip().split(' '))
# test_text = []
# for line in file2.readlines():
# test_text.append(line.split(' '))
freq_dict = freq(prep.flatten(text))
bigram_dict = bigr_freq(text)
print(bigram_dict)
# for sent in test_text:
# key = bigrams(sent)
# probab = 1
# for first, second in key:
# d = bigram_dict[first]
# if second in d:
# p = d[second] / freq_dict[first]
# else:
# p = 0
# probab *= p
# print(probab)
print(perp('<s> Георгий любит малину </s>'))
|
# -*- coding:utf-8 -*-
import json
import requests
from celery_config import cry
from config import xinlang_url, msg_url_1, rts
from robot.dao import update_user_role, dao_find, dao_sql, dao_add
import service_index
from robot.util.oss import itchat_upload_images_to_oss
def xin_lang_convert_short_url(surl):
print "=============xin_lang_convert_short_url==============="
print surl
data = {"url_long": surl}
content = requests.get(url=xinlang_url, params=data)
xx = json.loads(content.content)
return xx[0]["url_short"]
def handel_send_url(notice_info, code):
print "=================handel_send_url=========================="
print notice_info
msg_url = msg_url_1
msg_url += "?class_id_list=" + str(notice_info["school_class_id"])
msg_url += "¬ice_id=" + str(notice_info["notice_id"])
msg_url += "&user_id=" + str(notice_info["from_id"])
msg_url += "&mark_msg=" + code
msg_url = xin_lang_convert_short_url(msg_url)
return msg_url
def search_friends(new_instance_b, name=None, userName=None, remarkName=None, nickName=None,wechatAccount=None):
"""根据名称查好友信息"""
search_friends_dict = {}
search_friends = new_instance_b.search_friends(name=name, userName=userName, remarkName=remarkName, nickName=nickName,wechatAccount=wechatAccount)
if search_friends:
search_friends_dict["UserName"] = search_friends["UserName"]
search_friends_dict["DisplayName"] = search_friends["DisplayName"]
search_friends_dict["NickName"] = search_friends["NickName"]
search_friends_dict["HeadImgUrl"] = search_friends["HeadImgUrl"]
search_friends_dict["Uin"] = search_friends["Uin"]
search_friends_dict["Alias"] = search_friends["Alias"]
print "###search_friends##",search_friends
return search_friends_dict
def sql_search_class_id(class_info_lsit):
for i in class_info_lsit:
service_index.sql_search_class_id(class_info_lsit=class_info_lsit)
def sql_search_user(userName=None, Alias=None, DisplayName=None, Uin=None, keyword=None):
"""
查询库user表
:param userName:
:param Alias:
:param DisplayName:
:param Uin:
:param keyword:
:return:
"""
user_list = []
if userName:
user_list = service_index.sql_search_user_by_username(user_name=userName)
if Alias:
if Alias == "not_Alias_to_key_word" and keyword:
user_list = service_index.sql_search_user_by_alias(weixin=keyword)
else:
user_list = service_index.sql_search_user_by_alias(weixin=Alias)
return user_list
def q_isat_into(new_instance_b, msg):
print "######msg#####",msg
info = msg['Content']
content = info.replace(msg["jiqiren_name"], "", 1)
print "content", content
q_username = msg['FromUserName']
actual_nick_name = msg['ActualNickName']
print "q_username", q_username
print "ActualNickName", actual_nick_name # 发起用户在群里的昵称
"""获取群成员列表"""
memberlist = new_instance_b.update_chatroom(q_username, detailedMember=True)
"""更新群成员"""
school_class_id = service_index.into_q_memberlist(new_instance_b, memberlist)
print "=======更新群成员=============="
print "====school_class_id====", school_class_id
"""判断用户角色/修改用户角色"""
user_list = update_user_role.sql_search_user_role(school_class_id)
return ""
def into_edu_school_notice(content, to_user_name, user_list, content_class_name=None):
"""
存储发送内容
:param content: 内容
:param to_user_name: 发送者
:param user_list:
:param content_class_name:
:return:
"""
notice_info_list = []
if user_list:
for user in user_list:
print "#####user.displayname####",user.displayname
if user.displayname[-2:] == u"老师":
print "########是老师"
notice_info = service_index.into_edu_school_notice2(content=content, to_user_name=to_user_name, user=user,
content_class_name=content_class_name)
if notice_info:
notice_info_list.append(notice_info)
return notice_info_list
def q_into_text(new_instance_b, msg):
print "=====FromUserName===ActualUserName======="
print msg['FromUserName'], msg['ActualUserName']
q_username = msg['FromUserName'] # 机器人自己发消息,q_username为ToUserName
u_username = msg['ActualUserName'] # 发消息的人的信息,q_username为ToUserName
qun_class = new_instance_b.search_chatrooms(userName=q_username)
u_user = new_instance_b.search_friends(userName=u_username)
print 'QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ'
print u_user
print u_user["NickName"]
print 'QQQQQQQQQQQQQQQQQQQQQQQQQQQ'
"""获取微信头像存入数据库"""
try:
avatar = new_instance_b.get_head_img(userName=u_username, chatroomUserName=q_username)
except:
avatar = ''
head_img = itchat_upload_images_to_oss(avatar)
print 'TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT'
print "============qun_class================="
print qun_class
print "============qun_class================="
try:
room_uin = qun_class['Uin']
except Exception as e:
room_uin = ''
try:
EncryChatRoomId = qun_class['EncryChatRoomId']
except Exception as e:
EncryChatRoomId = ''
try:
IsOwner = qun_class['IsOwner']
except Exception as e:
IsOwner = ''
data = dict()
data["content"] = msg['Content']
data["actual_nick_name"] = msg['ActualNickName']
data["actual_user_name"] = msg['ActualUserName']
data["room_uin"] = room_uin
data["room_user_name"] = q_username
data["room_nick_name"] = qun_class["NickName"]
data["msg_type"] = msg['Type']
data["key_word"] = msg['Url']
data["city"] = qun_class['City']
data["province"] = qun_class['Province']
data["member_count"] = qun_class['MemberCount']
data["encry_chat_room_id"] = EncryChatRoomId
data["is_owner"] = IsOwner
data["robot_uin"] = msg["jiqiren_uin"]
data["robot_nick_name"] = msg["jiqiren_name"]
#data["from_uin"] = u_user["Uin"]
data["from_nick_name"] = u_user["NickName"]
data["from_user_name"] = msg['ActualNickName']
data["head_img"] = head_img
chat_room_data = service_index.into_chat_room_data(data)
from datetime import datetime
# def friend_into_text(msg):
# print "####msg#####", msg
# content = msg['Content']
# from_user_name = msg['FromUserName']
# to_user_name = msg['ToUserName']
# print "FromUserName@@@", from_user_name
# print "ToUserName@@@", to_user_name
# print "content@@@", content
# return service_index.into_friend_data_text(content=content, from_user_name=from_user_name,
# to_user_name=to_user_name)
def friend_into_text2(jqr, content, to_user_name, search_friend, msg_type, created_time=None):
if created_time is None:
created_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
data_id = service_index.into_friend_data_text2(jqr=jqr, content=content, to_user_name=to_user_name, search_friend=search_friend,
msg_type=msg_type, created_time=created_time)
return data_id
def handel_class_update_sys(uin, nick_name, user_name,KeyWord):
service_index.index_class_update_sys(uin=uin, nick_name=nick_name, user_name=user_name, KeyWord=KeyWord)
def friend_update_sys(uin, nick_name, alias, user_name):
service_index.friend_update_sys(uin=uin, nick_name=nick_name, alias=alias, user_name=user_name)
def class_manage(new_instance_b, msg, content_name, content_cmd):
content = msg['Content']
from_user_name = msg['FromUserName']
to_user_name = msg['ToUserName']
friends_info = new_instance_b.search_friends(name=None, userName=from_user_name, remarkName=None, nickName=None)
# print "#######friends_info##############",friends_info
friend_uin = friends_info["Uin"]
friend_nick_name = friends_info["NickName"]
friend_user_name = friends_info["UserName"]
weixin = friends_info["Alias"]
q_class_id_list, q_class_list, q_class_user_name_list, notice_id, error_info = service_index.get_class_id(alias=weixin,
nick_name=friend_nick_name,
from_user_name=from_user_name,
content=content,
content_name=content_name
)
q_class_user_name_list = list(set(q_class_user_name_list))
bjgl_dict = {"notice_id": notice_id, "Alias": weixin, "nick_name": friend_nick_name, "user_name": friend_user_name,
"class_id_list": q_class_id_list, "q_class_user_name_list": q_class_user_name_list,
"error_info": error_info}
return bjgl_dict
# """更新数据, 暂时不用"""
# def into_edu_wx_robot_friend(recod):
# itchat_get_friends = recod.get_friends()
# #print "#itchat_get_friends##",itchat_get_friends
# if itchat_get_friends:
# service_index.into_edu_wx_robot_friend(itchat_get_friends)
#
#
# """更新数据, 暂时不用"""
# def into_edu_wx_robot_chat(recod):
# itchat_get_chatrooms = recod.get_chatrooms()
# #print "#itchat_get_chatrooms##",itchat_get_chatrooms
# if itchat_get_chatrooms:
# service_index.into_edu_wx_robot_chat(itchat_get_chatrooms)
# friend_data_text = index.into_friend_data_text(content=content, from_user_name=from_user_name,
# to_user_name=to_user_name)
# content = msg['Content']
# q_username = msg['FromUserName'] # 机器人自己发消息,q_username为ToUserName
# actual_nick_name = msg['ActualNickName']
# actual_user_name = msg['ActualUserName']
# meg_type = msg['Type']
# chat_room_data = index.into_chat_room_data(content=content, actual_nick_name=actual_nick_name,
# actual_user_name=actual_user_name, q_username=q_username)
def find_class_data(school_class_id):
return dao_find.find_class_user_by_id(school_class_id)
# @cry.task
# def add_friend_chat():
# return into_edu_wx_robot_friend()
#
#
# @cry.task
# def add_room_chat():
# return into_edu_wx_robot_chat()
@cry.task
def sync_friends_queue(data):
print "=======sync_friends_queue=========="
dao_add.add_edu_friend_data(data)
def sync_friends(recond, friends, jqr):
print "=======sync_friends=========="
dao_sql.insert_robot_friend_history(jqr["jqr_uin"])
for friend in friends:
"""获取微信头像存入数据库"""
avatar = recond.get_head_img(userName=friend["UserName"])
head_img = itchat_upload_images_to_oss(avatar)
if not friend["Alias"]:
s_friend = recond.search_friends(userName=friend["UserName"])
data = dict()
data["robot_uin"] = jqr["jqr_uin"]
data["robot_nick_name"] = jqr["jqr_name"]
data["user_name"] = friend["Alias"]
data["friend_uin"] = friend["Uin"]
data["nick_name"] = friend["NickName"]
data["head_img"] = head_img
data["remark_name"] = friend["DisplayName"]
data["sex"] = friend["Sex"]
data["province"] = friend["Province"]
data["city"] = friend["City"]
sync_friends_queue.delay(data)
@cry.task
def sync_rooms_member_queue(data):
print "=======add_edu_robot_room_member_data=========="
dao_add.add_edu_robot_room_member_data(data)
def sync_rooms(new_instance_b, itchat_get_chatrooms, jqr):
dao_sql.insert_robot_chat_room_history(jqr["jqr_uin"])
dao_sql.insert_robot_room_memeber_history()
for robot in itchat_get_chatrooms:
print "=====robot itchat_get_chatrooms=========="
print robot
if "ChatRoomOwner" in robot:
ChatRoomOwner = robot["ChatRoomOwner"]
else:
ChatRoomOwner = ""
if "@@" in robot["UserName"] and "NickName" in robot and "Uin" in robot:
data = dict()
data["robot_uin"] = jqr["jqr_uin"]
data["robot_nick_name"] = jqr["jqr_name"]
data["room_user_name"] = robot["Alias"]
data["room_uin"] = robot['Uin']
data["room_nick_name"] = robot["NickName"]
data["is_owner"] = ChatRoomOwner
data["encry_chat_room_id"] = robot['EncryChatRoomId']
data["city"] = robot["City"]
data["member_count"] = robot["MemberCount"]
data["province"] = robot["Province"]
room_id = dao_add.add_edu_robot_room_data(data)
if robot["MemberList"]:
print "#####MemberList######", robot["MemberList"]
for user in robot["MemberList"]:
user_info = new_instance_b.search_friends(userName=user["UserName"])
if user_info:
user_uin = user_info["Uin"]
else:
user_info = {}
user_uin = U'暂无法获取'
user_info["NickName"] = user["NickName"]
if not user["DisplayName"]:
user["DisplayName"] = user_info["NickName"]
data = dict()
data["room_id"] = room_id
data["room_uin"] = robot['Uin']
data["member_uin"] = user_uin
data["user_name"] = user_info["NickName"]
data["room_nick_name"] = robot["NickName"]
data["nick_name"] = user["DisplayName"]
data["attr_status"] = user["AttrStatus"]
data["key_word"] = user["KeyWord"]
sync_rooms_member_queue.delay(data)
@cry.task
def sync_data(n):
for i in range(n):
print "==================sync_data==msg================"
print i
@cry.task
def sync_robot_data():
if len(rts) > 0:
for robot in rts:
data = dict()
data["user_name"] = robot["nick_name"]
data["nick_name"] = robot["nick_name"]
data["robot_uin"] = robot["robot_uin"]
r = dao_find.find_edu_robot_by_uin(robot["robot_uin"])
if r:
robot_id = dao_find.update_edu_robot(data, r.id, ["user_name", "nick_name"])
else:
robot_id = dao_add.add_edu_robot_data(data)
print "===sync_robot_data======", robot_id
def add_robot_self(nick_name, uin):
data = dict()
data["user_name"] = nick_name
data["nick_name"] = nick_name
data["robot_uin"] = uin
r = dao_find.find_edu_robot_by_uin(uin)
if r:
robot_id = dao_find.update_edu_robot(data, r.id, ["user_name", "nick_name"])
else:
robot_id = dao_add.add_edu_robot_data(data)
return robot_id
def service_search_room_id_by_uin(room_uin):
return dao_find.find_room_by_room_uin(room_uin)
def service_search_room_id_by_nick_name(room_nick_name):
return dao_find.find_room_by_room_nick_name(room_nick_name) |
"""
Scrapes information from the icd9data.com website.
Python 2.7.x
"""
import json
import requests
from bs4 import BeautifulSoup
base_url = "http://www.icd9data.com"
root_path = "/2015/Volume1/default.htm"
icd9_data = requests.get(base_url + root_path)
icd9_bs = BeautifulSoup(icd9_data.text, "lxml")
# first level codes
lvl1 = icd9_bs.ul.find_all('li')
codes = []
def build_node(href, code, descr, depth):
node = {}
node['href'] = href
node['code'] = code
node['depth'] = depth
node['descr'] = descr
return node
def get_children(lvl_doc):
"""
Extracts the relevant UL object in lvl_root. We assume
the two list types are mutually exclusive.
"""
lst_root = lvl_doc.find('div', class_='definitionList')
if lst_root is not None:
lst_root = lst_root.ul
if lst_root is None:
lst_root = lvl_doc.find('ul', class_='definitionList')
if lst_root is None:
lst_root = lvl_doc.find('ul', class_='codeHierarchyUL')
return list(lst_root.children)
def parse_base_cat(lvl, depth, path_so_far):
"""
Parses base category icd9data information (ranged categories).
"""
depth += 1
base_path = list(path_so_far) # copy path_so_far
for code_group in lvl:
path_so_far = list(base_path)
code = code_group.a.text
text_gen = code_group.strings
descr = None
for descr in code_group.strings:
pass
href = code_group.a['href']
node = build_node(href, code, descr, depth)
path_so_far.append(node)
print base_url + node['href']
next_lvl_data = requests.get(base_url + node['href'])
next_lvl_data = BeautifulSoup(next_lvl_data.text, "lxml")
next_lvl = get_children(next_lvl_data)
if (next_lvl[0].img is not None
and (next_lvl[0].img['alt'] == 'Non-specific code'
or next_lvl[0].img['alt'] == 'Specific code' )):
# we're at 3-digit codes now
parse_specific(next_lvl, depth, list(path_so_far))
else:
# then we're still in ranged categories
parse_base_cat(next_lvl, depth, list(path_so_far))
def build_specific_node(code, depth):
"""
Builds specific-level (3+ digit) nodes
"""
href = code.a['href']
code_str = code.a.text
descr = code.find("span", class_="threeDigitCodeListDescription").text
return build_node(href, code_str, descr, depth)
def parse_specific(lvl, depth, path_so_far):
"""
Parses specific-level (3+ digits) icd9 information
"""
depth += 1
start = 0
base_path = list(path_so_far)
if lvl[0].img['alt'] == 'Non-specific code':
# we're leading with the previous level code, don't add again
start = 1
code_iter = iter(lvl[start:])
code = code_iter.next()
while True:
try:
path_so_far = list(base_path) # make a copy
if code.find_all('img')[-1]['alt'] == 'Non-specific code':
path_so_far.append(build_specific_node(code, depth))
specific_path = list(path_so_far)
non_specific_code = code.a.text
depth += 1
code = code_iter.next()
while (code.find_all('img')[-1]['alt'] != 'Non-specific code'
and code.a.text.startswith(non_specific_code)):
path_so_far = list(specific_path)
path_so_far.append(build_specific_node(code, depth))
codes.append(path_so_far) # add to global list
code = code_iter.next()
depth -= 1
continue
elif code.find_all('img')[-1]['alt'] == 'Specific code':
# at a leaf code
path_so_far = list(path_so_far)
path_so_far.append(build_specific_node(code, depth))
codes.append(path_so_far)
code = code_iter.next()
except StopIteration:
break
if __name__ == '__main__':
for base in lvl1:
depth = 0
path_so_far = []
parse_base_cat([base], depth, path_so_far)
print len(codes)
with open('codes.json', 'w') as outfile:
json.dump(codes, outfile)
|
import math
from typing import List
import numpy as np
import io
def input_transpose(sents, pad_token):
"""
This function transforms a list of sentences of shape (batch_size, token_num) into
a list of shape (token_num, batch_size). You may find this function useful if you
use pytorch
"""
max_len = max(len(s) for s in sents)
batch_size = len(sents)
sents_t = []
for i in range(max_len):
sents_t.append([sents[k][i] if len(sents[k]) > i else pad_token for k in range(batch_size)])
return sents_t
def read_corpus(file_path, source):
data = []
for line in open(file_path, encoding="utf-8"):
sent = line.strip().split(' ')
# only append <s> and </s> to the target sentence
if source == 'tgt':
sent = ['<s>'] + sent + ['</s>']
data.append(sent)
return data
def batch_iter(data, batch_size, shuffle=True):
"""
Given a list of examples, shuffle and slice them into mini-batches
"""
batch_num = math.ceil(len(data) / batch_size)
index_array = list(range(len(data)))
# sort the pairs w.r.t. the length of the src sent
data = sorted(data, key=lambda e: len(e[0]), reverse=True)
batch_idx = list(range(batch_num))
if shuffle:
np.random.shuffle(batch_idx)
for i in batch_idx:
indices = index_array[i * batch_size: (i + 1) * batch_size]
examples = [data[idx] for idx in indices]
src_sents = [e[0] for e in examples]
tgt_sents = [e[1] for e in examples]
yield src_sents, tgt_sents
def load_matrix(fname, vocabs, emb_dim):
words = []
word2idx = {}
word2vec = {}
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
for line in fin:
tokens = line.rstrip().split(' ')
word = tokens[0]
word2idx[word] = len(words)
words.append(word)
word2vec[word] = np.array(tokens[1:]).astype(np.float)
matrix_len = len(vocabs)
weights_matrix = np.zeros((matrix_len, emb_dim))
words_found = 0
for i, word in enumerate(vocabs):
try:
weights_matrix[i] = word2vec[word]
words_found += 1
except KeyError:
weights_matrix[i] = np.random.random(size=(emb_dim,))
return weights_matrix
|
class Pokemon:
def __init__(self, attack, defense, health):
self.attack = attack
self.defense = defense
self.health = health
self.is_picked = False
if __name__ == "__main__":
n, k = map(int, input().split())
pokemon = []
for _ in range(n):
A, D, H = map(int, input().split())
pokemon.append(Pokemon(A, D, H))
pokemon.sort(key=lambda p: p.attack, reverse=True)
for i in range(k):
pokemon[i].is_picked = True
pokemon.sort(key=lambda p: p.defense, reverse=True)
for i in range(k):
pokemon[i].is_picked = True
pokemon.sort(key=lambda p: p.health, reverse=True)
for i in range(k):
pokemon[i].is_picked = True
count = 0
for p in pokemon:
if p.is_picked:
count += 1
print(count)
|
#!/usr/bin/env python
# Funtion:
# Filename:
import pymysql
conn = pymysql.connect(host = '127.0.0.1', port = 3306,
user = 'root', passwd = 'lemaker',
db = 'test_20180122', charset = 'utf8')
# cursor = conn.cursor(cursor=pymysql.cursors.DictCursor) # 以字典形式显示
cursor = conn.cursor() # 以元组形式显示
# effect_row = cursor.execute("select * from school")
efect_row = cursor.execute('desc school;')
# print('----', effect_row)
# row_2 = cursor.fetchone()
# cursor.execute("insert into school (name, age, gender, register_date, phone, city) "
# "values ('ty', 26, 'M', '2006-2-3', 1342134130, 'SH');")
# print('---', dir(cursor))
cursor.execute('select * from school;')
row_1 = cursor.fetchone()
print(row_1)
row_2 = cursor.fetchone()
print(row_2)
cursor.scroll(-2, mode='relative')
# cursor.scroll(0, mode='absolute')
row = cursor.fetchone()
print(row)
# all_row = cursor.fetchall()
# for table in all_row:
# print(table)
# print(all_row)
# print(all_row[0])
# conn.commit()
cursor.close()
conn.close()
# 获取自增ID
new_id = cursor.lastrowid
print(new_id) |
#!/usr/bin/python
import csv
import string
import re
with open('../files/feature1_train.txt', 'rb') as f1:
feature1_train_as_list = f1.read().splitlines()
f1.close();
with open('../files/feature2_train.txt', 'rb') as f2:
feature2_train_as_list = f2.read().splitlines()
f2.close();
with open('../files/feature3_train.txt', 'rb') as f3:
feature3_train_as_list = f3.read().splitlines()
f3.close();
with open('../files/feature4_train.txt', 'rb') as f4:
feature4_train_as_list = f4.read().splitlines()
f4.close();
with open('../files/feature5_train.txt', 'rb') as f5:
feature5_train_as_list = f5.read().splitlines()
f5.close();
with open('../files/feature6_train.txt', 'rb') as f6:
feature6_train_as_list = f6.read().splitlines()
f6.close();
with open('../files/feature7_train.txt', 'rb') as f7:
feature7_train_as_list = f7.read().splitlines()
f7.close();
with open('../files/feature8_train.txt', 'rb') as f8:
feature8_train_as_list = f8.read().splitlines()
f8.close();
with open('../files/feature9_train.txt', 'rb') as f9:
feature9_train_as_list = f9.read().splitlines()
f9.close();
# with open('../files/feature10_train.txt', 'rb') as f10:
# feature10_train_as_list = f10.read().splitlines()
# f10.close();
with open('../files/feature11_train.txt', 'rb') as f11:
feature11_train_as_list = f11.read().splitlines()
f11.close();
with open('../files/feature12_train.txt', 'rb') as f12:
feature12_train_as_list = f12.read().splitlines()
f12.close();
with open('../files/feature12_train.txt', 'rb') as f12:
feature12_train_as_list = f12.read().splitlines()
f12.close();
with open('../files/train.csv', 'rb') as f:
reader = csv.reader(f)
train_file_as_list = list(reader)
#print to csv files
with open("../files/features_train_x.csv", "w") as csv_file:
for i in range(0, len(feature1_train_as_list)):
csv_file.write(feature1_train_as_list[i] +"," + feature2_train_as_list[i] +","
+ feature3_train_as_list[i]+"," + feature4_train_as_list[i]+"," + feature5_train_as_list[i]+","
+ feature6_train_as_list[i]+"," + feature7_train_as_list[i]+"," + feature8_train_as_list[i]+","
+ feature9_train_as_list[i]+","+ feature11_train_as_list[i]+"," + feature12_train_as_list[i] + ","
+ "\n")
#print to csv files
with open("../files/features_train_y.csv", "w") as csv_file:
for i in range(0, len(feature1_train_as_list)):
csv_file.write(train_file_as_list[i+1][4] + "\n")
print("Products have been made into csv file");
#then open and save in excel as csv |
"""Author Arianna Delgado
Created on May 15, 2020
"""
"""For multiple lines Comments use """
# I can use this three 'symbols' to comments (#, ''' or """)
print('We can use \'single quote\' or \"double quote to print\".')
# Indentation is used as 4 space; it is mandatory in python to recognize a block of code! |
import unittest
from selenium import webdriver
import time
class TestsLessonTen(unittest.TestCase):
def test_1(self):
link = "http://suninjuly.github.io/registration1.html"
browser = webdriver.Chrome()
browser.get(link)
input1 = browser.find_element_by_xpath("//input[@class='form-control first' and @required]")
input1.send_keys("Ivan")
input2 = browser.find_element_by_xpath("//input[@class='form-control second' and @required]")
input2.send_keys("Petrov")
input3 = browser.find_element_by_xpath("//input[@class='form-control third' and @required]")
input3.send_keys("Smolensk")
button = browser.find_element_by_css_selector("button.btn")
button.click()
time.sleep(1)
welcome_text_elt = browser.find_element_by_tag_name("h1")
welcome_text = welcome_text_elt.text
self.assertEqual("Поздравляем! Вы успешно зарегистировались!", welcome_text, "Fail Test 1")
def test_2(self):
link = "http://suninjuly.github.io/registration2.html"
browser = webdriver.Chrome()
browser.get(link)
input1 = browser.find_element_by_xpath("//input[@class='form-control first' and @required]")
input1.send_keys("Ivan")
input2 = browser.find_element_by_xpath("//input[@class='form-control second' and @required]")
input2.send_keys("Petrov")
input3 = browser.find_element_by_xpath("//input[@class='form-control third' and @required]")
input3.send_keys("Smolensk")
button = browser.find_element_by_css_selector("button.btn")
button.click()
time.sleep(1)
welcome_text_elt = browser.find_element_by_tag_name("h1")
welcome_text = welcome_text_elt.text
self.assertEqual("Поздравляем! Вы успешно зарегистировались!", welcome_text, "Fail Test 2")
if __name__ == "__main__":
unittest.main() |
# redis
#1---连接方式
'''
import redis
r=redis.Redis(host='127.0.0.1',port=6379,db=0)
r.set('name','baby')
print(r.get('name'))
print(r.dbsize())
'''
#2--连接池
import redis
pool = redis.ConnectionPool(host='127.0.0.1', port=6379)
r = redis.Redis(connection_pool=pool)
r.set('name', 'zhangsan') #添加,覆盖前面的baby值
print (r.get('name')) #获取
|
from functools import cmp_to_key
import numpy as np
class Point(object):
__slots__ = ('x', 'y')
center = Point()
def cmp_by_clockwise(a_point, b_point):
cmp = lambda a,b : (a > b) - (a < b)
a_x, a_y = a_point
b_x, b_y = b_point
if a_x - center.x >= 0 and b_x - center.x < 0:
return -1
if a_x - center.x < 0 and b_x - center.x >= 0:
return 1
if a_x - center.x == 0 and b_x - center.x == 0:
if a_y - center.y >= 0 or b_y - center.y >= 0:
return -cmp(a_y, b_y)
return cmp(a_y, b_y)
# compute the cross product of vectors (center -> a) x (center -> b)
det = (a_x - center.x) * (b_y - center.y) - \
(b_x - center.x) * (a_y - center.y)
if det < 0:
return -1
if det > 0:
return 1
# points a and b are on the same line from the center
# check which point is closer to the center
d1 = (a_x - center.x) * (a_x - center.x) + \
(a_y - center.y) * (a_y - center.y)
d2 = (b_x - center.x) * (b_x - center.x) + \
(b_y - center.y) * (b_y - center.y)
if d1 > d2:
return -1
elif d1 < d2:
return 1
return 0
def sort_clockwise(point_array):
global center
center_coordinate = np.mean(point_array, axis=0)
center.x = center_coordinate[0]
center.y = center_coordinate[1]
ret = sorted(point_array, key=cmp_to_key(cmp_by_clockwise))
return np.array(ret)
if __name__ == "__main__":
point_array = [(10, 20), (20, 20), (10, 30), (20, 30)]
print(sort_clockwise(point_array))
|
# ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
import atexit
import os
import re
import sys
import urllib2
from pants.base.build_environment import get_buildroot
from twitter.common.dirutil import Fileset, safe_delete
# TODO(John Sirois): replace this source fetching backdoor with a proper remote fileset once
# pants supports that (on the pants roadmap): https://github.com/twitter/commons/issues/338
class RemotePythonThriftFileset(object):
# TODO(wickman) Use the antlr thrift parser to just walk the thrift AST
# and replace keywords named by 'from' with 'from_'.
FROM_RE = re.compile('from[,;]*$', re.MULTILINE)
FROM_REPLACEMENT = 'from_'
@classmethod
def factory(cls, parse_context):
staging_dir = os.path.join(get_buildroot(), parse_context.rel_path)
return cls(staging_dir)
def __init__(self, staging_dir):
self._staging_dir = staging_dir
self._fetched = []
def _fetch(self, base_url, sources):
for source in sources:
if isinstance(source, tuple):
assert len(source) == 2, 'Expected source, namespace tuple, got %s' % repr(source)
source_file, namespace = source
elif isinstance(source, str):
source_file, namespace = source, None
fetch_path = base_url + '/' + source_file
print('Fetching %s' % fetch_path, file=sys.stderr)
target_file = os.path.join(self._staging_dir, source_file)
url = urllib2.urlopen(fetch_path)
with open(target_file, 'wb') as fp:
fp.write(self.prefilter(url.read(), namespace=namespace))
self._fetched.append(target_file)
yield source_file
def prefilter(self, content, namespace=None):
return ''.join(['namespace py %s\n' % namespace if namespace else '',
re.sub(self.FROM_RE, self.FROM_REPLACEMENT, content)])
def cleanup(self):
for fetched in self._fetched:
safe_delete(fetched)
def __call__(self, base_url, sources):
def fetch():
atexit.register(self.cleanup)
return self._fetch(base_url, sources)
return Fileset(fetch) |
"""
Default SVG cover image generator
@author: GauravManek
"""
import plugins, io;
from PIL import Image, ImageDraw, ImageFont;
from math import floor;
class DefaultPNGCover(plugins.BaseCover):
def __init__(self):
# A short string used to report this name in the style.
self.name = "Default PNG Cover";
# A short string used to identify this styling option in the commandline.
# Does not have to be different than the Style devstr
self.devstr = "png";
# Produces a (ext, data) tuple, given:
# bookmeta = The book metadata.
#
# The output is:
# ext = File extension of data, which must be supported by the ePub standard.
# data = Image Data.
def cover(self, bookmeta):
im = Image.new("L", (600, 1000), 0);
draw = ImageDraw.Draw(im)
# Background:
draw.rectangle((10, 10, 590, 990), 0xFF);
draw.rectangle((20, 20, 580, 700), 0x33);
# Title:
MAX_CHAR_LINE = 16;
MAX_LINE = 5;
tt = [""];
# Word wrapping:
title = bookmeta["title"];
title = title.replace("and the", "and_the").replace("of the", "of_the");
for word in title.split():
line = tt.pop();
if "_" in word:
if len(line) > 0:
tt.append(line);
tt.append(word.replace("_", " "));
tt.append("");
elif len(line) == 0 or len(line) + len(word) < MAX_CHAR_LINE:
tt.append(line + word + " ");
else:
tt.append(line);
tt.append(word + " ");
# Strip all lines:
tt = [t.strip() for t in tt];
offset = (floor(580/60) - len(tt))*20;
f36 = ImageFont.truetype("verdana.ttf", 56)
for l in range(min(len(tt), MAX_CHAR_LINE)):
sz = f36.getsize(tt[l]);
draw.text(((600 - sz[0])/2, 60*l + offset), tt[l], fill=0xFF, font=f36);
poly = [(74,91.57), (26,91.57), (2,50), (26,8.43), (74,8.43), (98,50), (74,91.57)];
poly = [(x-50, y-50) for x, y in poly];
draw.polygon([(300 + x*1.1, 700 + y*1.1) for x, y in poly], fill=0xFF);
draw.polygon([(300 + x, 700 + y) for x, y in poly], fill=0x99);
f20i = ImageFont.truetype("verdanai.ttf", 40);
sz = f20i.getsize(bookmeta["author"]);
draw.text(((600-sz[0])/2, 750), bookmeta["author"], fill=0x00, font=f20i);
# Attrib
if "attrib" in bookmeta:
f24 = ImageFont.truetype("verdana.ttf", 40);
sz = f24.getsize(bookmeta["attrib"]);
draw.text(((600-sz[0])/2, 1000 - 15 - sz[1]), bookmeta["attrib"], fill=0x99, font=f24);
b = io.BytesIO();
im.save(b, "PNG");
return (".png", b.getvalue());
if __name__ == "plugins":
plugins.register(DefaultPNGCover());
|
#!/usr/bin/env python
# ==================================================================================== #
#
# Copyright (c) 2017 Raffaele Bua (buele)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ==================================================================================== #
import datetime
from sqlalchemy import *
from sqlalchemy.orm import *
import configparser
from src.data.database.entities.product import Product
from src.data.database.entities.product import ProductStatus
__author__ = "Raffaele Bua (buele)"
__copyright__ = "Copyright 2017, Raffaele Bua"
__credits__ = ["Raffaele Bua"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Raffaele Bua"
__contact__ = "info@raffaelebua.eu"
__status__ = "Development"
class ProductsService():
""" This class is the service to manage products inside the database. It is a wrapper to manage products,
exploiting SqlAlchemy ORM.
"""
def __init__(self):
config = configparser.ConfigParser()
config.read("src/data/database/config/db.cfg")
self.engine = create_engine(config['database']['connection_string'], pool_recycle=3600)
def add_new_product(self, product):
""" Add a new product in the database
:param product: Product entity to add in the database
:type product: Product
"""
result = False
Session = sessionmaker(bind=self.engine)
session = Session()
already_present_product = session.query(Product). \
filter(Product.name == product.name).all()
try:
if len(already_present_product) == 0:
session.add(product)
session.commit()
result = True
session.close()
except Exception as err:
pass
return result
def get_products_to_process(self):
Session = sessionmaker(bind=self.engine)
session = Session()
result = session.query(Product). \
filter(Product.status == ProductStatus.downloaded).all()
session.close()
return result
def update_product_status(self, product_name, status):
Session = sessionmaker(bind=self.engine)
session = Session()
product = session.query(Product). \
filter(Product.name == product_name).first()
if product != None:
product.status = status
product.last_modify = datetime.datetime.utcnow()
session.commit()
session.close()
def get_pending_products(self):
Session = sessionmaker(bind=self.engine)
session = Session()
result = session.query(Product).\
filter(Product.status == ProductStatus.pending).all()
session.close()
return result
def get_downloading_products(self):
Session = sessionmaker(bind=self.engine)
session = Session()
result = session.query(Product). \
filter(Product.status == ProductStatus.downloading).all()
session.close()
return result
def get_a_downloaded_product(self):
Session = sessionmaker(bind=self.engine)
session = Session()
result = session.query(Product). \
filter(Product.status == ProductStatus.downloaded).first()
session.close()
return result
def get_downloaded_products(self):
Session = sessionmaker(bind=self.engine)
session = Session()
result = session.query(Product). \
filter(Product.status == ProductStatus.downloaded).all()
session.close()
return result
|
import os
from testconfig import config
from gocdapi_utils.go_launcher import GoAgentLauncher
from gocdapi_utils.go_launcher import GoServerLauncher
go_instances = {}
static_instances = config.get('static_instances', False)
def setUpPackage():
if not static_instances:
version = "16.5.0-3305"
systests_dir, _ = os.path.split(__file__)
go_instances['server'] = GoServerLauncher(systests_dir, version)
go_instances['agent'] = GoAgentLauncher(systests_dir, version)
go_instances['server'].start()
go_instances['agent'].start()
def tearDownPackage():
if not static_instances:
go_instances['agent'].stop()
go_instances['server'].stop()
|
import os
class SignExpiries:
"""Expirations members of module"""
REGISTRATION_EMAIL = 60 * 60 * 24 # 1 day
class ErrorCodes:
"""User errors return codes"""
INVALID_PASSWORD = 1
INVALID_SIGN = 2
INVALID_TOKEN = 3
PASSWORDS_DO_NOT_MATCH = 4
USER_EXIST = 5
USER_NOT_CONFIRMED = 6
USER_NOT_FOUND = 7
WRONG_PASSWORD = 8
class MessageTypes:
"""Type of available user messages"""
CONFIRM = int(os.getenv('MESSAGE_CONFIRM'))
WELCOME = int(os.getenv('MESSAGE_WELCOME'))
RESTORE = int(os.getenv('MESSAGE_RESTORE'))
|
from datetime import date
from datetime import timedelta
from freezegun import freeze_time
from onegov.agency.collections import ExtendedAgencyCollection
from onegov.agency.collections import ExtendedPersonCollection
from onegov.agency.pdf import AgencyPdfAr
from onegov.agency.pdf import AgencyPdfDefault
from onegov.agency.pdf import AgencyPdfZg
from onegov.pdf.utils import extract_pdf_info
from sedate import utcnow
def test_pdf_page_break_on_level(session):
pass
def test_agency_pdf_default(session):
people = ExtendedPersonCollection(session)
aeschi = people.add(
last_name="Aeschi",
first_name="Thomas",
political_party="SVP"
)
eder = people.add(
last_name="Eder",
first_name="Joachim",
political_party="FDP"
)
agencies = ExtendedAgencyCollection(session)
bund = agencies.add_root(title="Bundesbehörden")
canton = agencies.add_root(title="Kanton")
nr = agencies.add(
parent=bund,
title="Nationalrat",
portrait="Portrait NR",
export_fields=[
'membership.title',
'person.title'
]
)
sr = agencies.add(
parent=bund,
title="Ständerat",
export_fields=[
'person.first_name',
'person.last_name',
'person.political_party',
]
)
nr.add_person(aeschi.id, "Mitglied von Zug")
sr.add_person(eder.id, "Ständerat für Zug")
# test single agency with toc break on level 1
file = AgencyPdfDefault.from_agencies(
agencies=[bund],
title="Staatskalender",
toc=True,
exclude=[],
page_break_on_level=1
)
pages, pdf = extract_pdf_info(file)
assert pages == 2
assert "Staatskalender" in pdf
assert "1 Bundesbehörden" in pdf
assert "1.1 Nationalrat" in pdf
assert "1.2 Ständerat" in pdf
assert "1 Bundesbehörden" in pdf
assert "1.1 Nationalrat" in pdf
assert "Portrait NR" in pdf
assert "Mitglied von Zug" in pdf
assert "Aeschi Thomas" in pdf
assert "SVP" not in pdf
assert "1.2 Ständerat" in pdf
assert "Ständerat für Zug" not in pdf
assert "Joachim" in pdf
assert "Eder, FDP" in pdf
# test page break on level 2
file = AgencyPdfDefault.from_agencies(
agencies=[bund],
title="Staatskalender",
toc=True,
exclude=[],
page_break_on_level=2
)
pages, pdf = extract_pdf_info(file)
assert pages == 3
assert "1.2 Ständerat" in pdf
assert "Ständerat für Zug" not in pdf
assert "Joachim" in pdf
assert "Eder, FDP" in pdf
# test page break on level 1 with succeeding headers
file = AgencyPdfDefault.from_agencies(
agencies=[bund, canton],
title="Staatskalender",
toc=True,
exclude=[],
page_break_on_level=1
)
pages, pdf = extract_pdf_info(file)
assert pages == 3
assert "2 Kanton" in pdf
assert "2 Kanton" in pdf
file = AgencyPdfDefault.from_agencies(
agencies=[nr, sr],
title="Staatskalender",
toc=False,
exclude=['political_party']
)
pages, pdf = extract_pdf_info(file)
assert "Staatskalender" in pdf
assert "Bundesbehörden" not in pdf
assert "FDP" not in pdf
assert "SVP" not in pdf
file = AgencyPdfDefault.from_agencies(
agencies=[nr],
title="Nationalrat",
toc=False,
exclude=[]
)
pages, pdf = extract_pdf_info(file)
assert pages == 1
assert 'Nationalrat' in pdf
assert 'Portrait NR' in pdf
assert 'Mitglied von Zug' in pdf
assert 'Aeschi Thomas' in pdf
def test_agency_pdf_default_hidden_by_access(session):
people = ExtendedPersonCollection(session)
aeschi = people.add(
last_name="Aeschi",
first_name="Thomas",
access='private'
)
eder = people.add(
last_name="Eder",
first_name="Joachim"
)
agencies = ExtendedAgencyCollection(session)
bund = agencies.add_root(title="Bundesbehörden")
agencies.add(
parent=bund,
title="Bundesrat",
access='private'
)
nr = agencies.add(
parent=bund,
title="Nationalrat",
export_fields=['membership.title', 'person.title']
)
sr = agencies.add(
parent=bund,
title="Ständerat",
export_fields=['membership.title', 'person.title']
)
nr.add_person(aeschi.id, "Mitglied von Zug")
sr.add_person(eder.id, "Ständerat für Zug", access='private')
file = AgencyPdfDefault.from_agencies(
agencies=[bund],
title="Staatskalender",
toc=False,
exclude=[]
)
_, pdf = extract_pdf_info(file)
assert "Bundesrat" not in pdf
assert "Nationalrat" in pdf
assert "Ständerat" in pdf
assert "Mitglied von Zug" not in pdf
assert "Aeschi" not in pdf
assert "Ständerat für Zug" not in pdf
assert "Eder" not in pdf
def test_agency_pdf_default_hidden_by_publication(session):
then = utcnow() + timedelta(days=7)
people = ExtendedPersonCollection(session)
aeschi = people.add(
last_name="Aeschi",
first_name="Thomas",
publication_start=then
)
eder = people.add(
last_name="Eder",
first_name="Joachim"
)
agencies = ExtendedAgencyCollection(session)
bund = agencies.add_root(title="Bundesbehörden")
agencies.add(
parent=bund,
title="Bundesrat",
publication_start=utcnow() + timedelta(days=7)
)
nr = agencies.add(
parent=bund,
title="Nationalrat",
export_fields=['membership.title', 'person.title']
)
sr = agencies.add(
parent=bund,
title="Ständerat",
export_fields=['membership.title', 'person.title']
)
nr.add_person(aeschi.id, "Mitglied von Zug")
sr.add_person(eder.id, "Ständerat für Zug", publication_start=then)
file = AgencyPdfDefault.from_agencies(
agencies=[bund],
title="Staatskalender",
toc=False,
exclude=[]
)
_, pdf = extract_pdf_info(file)
assert "Bundesrat" not in pdf
assert "Nationalrat" in pdf
assert "Ständerat" in pdf
assert "Mitglied von Zug" not in pdf
assert "Aeschi" not in pdf
assert "Ständerat für Zug" not in pdf
assert "Eder" not in pdf
@freeze_time("2018-01-01")
def test_agency_pdf_ar(session):
people = ExtendedPersonCollection(session)
aeschi = people.add(
last_name="Aeschi",
first_name="Thomas",
political_party="SVP"
)
eder = people.add(
last_name="Eder",
first_name="Joachim",
political_party="FDP"
)
agencies = ExtendedAgencyCollection(session)
bund = agencies.add_root(title="Bundesbehörden")
nr = agencies.add(
parent=bund,
title="Nationalrat",
portrait="Portrait NR",
export_fields=[
'membership.title',
'person.title'
]
)
sr = agencies.add(
parent=bund,
title="Ständerat",
export_fields=[
'person.first_name',
'person.last_name',
'person.political_party',
]
)
nr.add_person(aeschi.id, "Mitglied von AR")
sr.add_person(eder.id, "Ständerat für AR")
file = AgencyPdfAr.from_agencies(
agencies=[bund],
title="Staatskalender",
toc=True,
exclude=[]
)
pages, pdf = extract_pdf_info(file)
assert pages == 2
assert 'Staatskalender' in pdf
assert '1 Bundesbehörden' in pdf
assert '1.1 Nationalrat' in pdf
assert '1.2 Ständerat' in pdf
assert 'Staatskalender Kanton Appenzell Ausserrhoden' in pdf
assert '1 Bundesbehörden' in pdf
assert '1.1 Nationalrat' in pdf
assert 'Portrait NR' in pdf
assert 'Mitglied von AR' in pdf
assert 'Aeschi Thomas' in pdf
assert '1.2 Ständerat' in pdf
assert 'Joachim' in pdf
assert 'Eder, FDP' in pdf
assert f'Druckdatum: {date.today():%d.%m.%Y}' in pdf
assert '2' in pdf
def test_agency_pdf_zg(session):
people = ExtendedPersonCollection(session)
aeschi = people.add(
last_name="Aeschi",
first_name="Thomas",
political_party="SVP"
)
eder = people.add(
last_name="Eder",
first_name="Joachim",
political_party="FDP"
)
agencies = ExtendedAgencyCollection(session)
bund = agencies.add_root(title="Bundesbehörden")
nr = agencies.add(
parent=bund,
title="Nationalrat",
portrait="Portrait NR",
export_fields=[
'membership.title',
'person.title'
]
)
sr = agencies.add(
parent=bund,
title="Ständerat",
export_fields=[
'person.first_name',
'person.last_name',
'person.political_party',
]
)
nr.add_person(aeschi.id, "Mitglied von Zug")
sr.add_person(eder.id, "Ständerat für Zug")
file = AgencyPdfZg.from_agencies(
agencies=[bund],
title="Staatskalender",
toc=True,
exclude=[]
)
pages, pdf = extract_pdf_info(file)
pdf = pdf.replace(' ', ' ')
assert pages == 2
assert 'Staatskalender' in pdf
assert '1 Bundesbehörden' in pdf
assert '1.1 Nationalrat' in pdf
assert '1.2 Ständerat' in pdf
assert '1 Bundesbehörden' in pdf
assert '1.1 Nationalrat' in pdf
assert 'Portrait NR' in pdf
assert 'Mitglied von Zug' in pdf
assert 'Aeschi Thomas' in pdf
assert '1.2 Ständerat' in pdf
assert 'Joachim' in pdf
assert 'Eder, FDP' in pdf
assert 'Staatskalender' in pdf
assert f'Druckdatum: {date.today():%d.%m.%Y}' in pdf
assert '2' in pdf
|
from __future__ import print_function
import boto3
import json
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
logger.info(event)
##print("Received event: " + json.dumps(event, indent=2))
operation = event['operation']
if 'tableName' in event:
dynamo = boto3.resource('dynamodb').Table("persons")
# return {
# 'statusCode': 200,
# 'body': datetime.now().isoformat()
# }
operations = {
'create': lambda x: dynamo.put_item(**x),
'read': lambda x: dynamo.get_item(**x),
'delete': lambda x: dynamo.delete_item(**x),
'echo': lambda x: x,
'ping': lambda x: 'pong'
}
personDict = (operations['read'](event.get('payload')))
#return personDict
if (len(personDict))>0 :
#something
jsonStr=json.dumps(personDict)
js=json.loads(jsonStr)
if "Item" in js:
j=json.loads(json.dumps(js["Item"]))
return "Welcome Back, " + j["personId"]
else:
payloadstr= json.dumps( event.get('payload') )
payjs = json.loads(payloadstr)
keystr = json.dumps(payjs['Key'])
personstr=json.loads(keystr)['personId']
thisdict = {
"Item": {
"personId": personstr
}
}
operations['create'](thisdict)
return "Hello " +json.loads(keystr)['personId']
else:
#operations['create'](event.get('payload'))
return "Hello " + event.get('payload') |
#!/usr/bin/env python3
#
# Format a spec
#
import jsontemplate
import pscheduler
import sys
from validate import spec_is_valid
try:
format = sys.argv[1]
except IndexError:
format = 'text/plain'
json = pscheduler.json_load(exit_on_error=True)
valid, message = spec_is_valid(json)
if not valid:
pscheduler.fail(message)
oid_str = ""
for i, oid in enumerate(json['oidargs']):
oid_str += oid
if i != len(json['oidargs']) - 1:
oid_str += ', '
json['oidargs'] = oid_str
if format == 'text/plain':
template = """
Host ...................... {.section host}{host}{.or}Not Specified{.end}
Host Node ................. {.section host_node}{host_node}{.or}Not Specified{.end}
Destination .................... {.section dest}{dest}{.or}Not Specified{.end}
Version ..................... {.section version}{version}{.or}Not Specified{.end}
OID ........................ {.section oid}{oid}{.or}Not Specified{.end}
Vartype ........................ {.section vartype}{vartype}{.or}Not Specified{.end}
Varvalue ........................ {.section varvalue}{varvalue}{.or}Not Specified{.end}
Protocol ................... {.section protocol}{protocol}{.or}UDP{.end}
Timeout ................... {.section timeout}{timeout}{.or}Not Specified{.end}"""
if json['version'] == '3':
template = template + """
Security Name .............. {.section security_name}{security_name}{.or}Not Specified{.end}
Authentication Protocol ...... {.section auth_protocol}{auth_protocol}{.or}Not Specified{.end}
Privacy Protocol ............ {.section priv_protocol}{priv_protocol}{.or}Not Specified{.end}
Security Level ............. {.section security_level}{security_level}{.or}Not Specified{.end}
Context .................... {.section context}{context}{.or}Not Specified{.end}
"""
elif format == 'text/html':
template = """
<table>
<tr><td>Host</td><td>{.section host}{host}{.or}Not Specified{.end}</td></tr>
<tr><td>Host Node</td><td>{.section host-node}{host_node}{.or}Not Specified{.end}</td></tr>
<tr><td>Destination</td><td>{.section dest}{dest}{.or}Not Specified{.end}</td></tr>
<tr><td>Version</td><td>{.section version}{version}{.or}Not Specified{.end}</td></tr>
<tr><td>OID</td><td>{.section oid}{oid}{.or}Not Specified{.end}</td></tr>
<tr><td>Vartype</td><td>{.section vartype}{vartype}{.or}Not Specified{.end}</td></tr>
<tr><td>Varvalue</td><td>{.section varvalue}{varvalue}{.or}Not Specified{.end}</td></tr>
<tr><td>Protocol</td><td>{.section protocol}{protocol}{.or}Not Specified{.end}</td></tr>
<tr><td>Timeout</td><td>{.section timeout}{timeout}{.or}Not Specified{.end}</td></tr>"""
if json['version'] == '3':
template = template + """
<tr><td>Security Name</td><td>{.section security_name}{security_name}{.or}Not Specified{.end}</td></tr>
<tr><td>Authentication Protocol</td><td>{.section auth_protocol}{auth_protocol}{.or}Not Specified{.end}</td></tr>
<tr><td>Privacy Protocol</td><td>{.section priv_protocol}{priv_protocol}{.or}Not Specified{.end}</td></tr>
<tr><td>Security Level</td><td>{.section security_level}{security_levelsl}{.or}Not Specified{.end}</td></tr>
<tr><td>Context</td><td>{.section context}{context}{.or}Not Specified{.end}</td></tr>
</table>
"""
else:
template = template + """
</table>
"""
else:
pscheduler.fail("Unsupported format '%s'" % format)
# TODO: Should probably handle exceptions in a nicer way.
print(jsontemplate.expand(template, json).strip())
|
"""
Author: Dan Zelenak
Date: 3/8/2017
Purpose: Take a color table exported from ArcMap and convert it
into XML as a new .txt file to be used by GDAL.
"""
from argparse import ArgumentParser
def main_work(ffile, newfile):
"""
:param ffile:
:param newfile:
:return:
"""
with open(ffile, 'r') as x, open(newfile, 'w') as y:
y.write(' <ColorInterp>Palette</ColorInterp>\n'
' <ColorTable>\n')
for line in x:
line = line.strip() # remove any \n's
a = line.split(' ') # make a list of the items in line
print(a)
del (a[0]) # remove first item from list
a.insert(0, ' <Entry c1=') # insert item at beginning of list a
a.append('/>') # add item to the end of list a
print(a)
# use string formatting to rewrite items in list 'a' as a string
# with the additional characters needed for the color table
print(a[0], a[1], a[2], a[3], a[4])
b = '{0}"{1}" c2="{2}" c3="{3}" c4="255"{4}\n'.format(a[0], a[1], a[2], a[3], a[4])
y.write(b) # write the string b to the output color table
# write the final line to the color table
y.write(' </ColorTable>\n')
return None
if __name__ == "__main__":
description = "Take a color table exported from ArcMap, convert it into XML, and write to a new .txt file\n" \
"which can be used by GDAL"
parser = ArgumentParser(description=description)
parser.add_argument("-c", "--color", dest="ffile", type=str, required=True,
help="The full path to the .clr table")
parser.add_argument("-n", "--new", dest="newfile", type=str, required=True,
help="The full path to the output .txt file")
args = parser.parse_args()
main_work(**vars(args))
|
from django.urls import path
from . import views
# URLConf Module (URL Configurations) Rememner to import into main config ventureinsight_prj/urls.py
urlpatterns = [
path('', views.home),
path('signup/', views.signup),
path('login/', views.login),
path('profile/', views.profile),
path('dashboard/', views.dashboard, name="dashboard"),
path('learningpath/', views.learningpath),
path('logout/', views.logout),
path('lesson/', views.lesson),
path('quiz/', views.quiz)
]
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# Copyright (c) 2016 Shota Shimazu
# This program is freely distributed under the MIT, see LICENSE for detail.
# Import required library
import os
import sys
import subprocess, shutil
# Variables
target_path = "Default"
st_qo = "##############################"
ed_qo = "##############################\n"
# Functions
def log_init():
global st_qo
global ed_qo
f = open("build-log.log", "a")
f.write(st_qo + "BUILD LOG" + ed_qo)
f.close()
def Init():
if os.path.exists("build-log.log"):
log_init()
else:
f = open("build-log.log", "a")
f.writelines("------ Created Log File " + "------\n")
f.close()
def Msg(msg):
print("AutoSwift: " + msg)
def Log(log_msg, log = "null"):
f = open("build-log.log", "a")
if log == "null":
f.write("Script: " + log_msg + "\n")
else:
f.write(log + ": " + log_msg + "\n")
f.close()
def getGitUrl():
global target_path
print "###################################################################"
print "## Type your git host or url. (MODE) ##"
print "###################################################################"
urlType = raw_input()
if "http" in urlType:
Msg("AutoSwift: You selected costom url.")
Log("Selected target as URL.")
clone_url = urlType
Msg("Please type expected target source's path.")
target_path = raw_input()
elif "clean" in urlType:
clean_cmd = "rm -rf *"
subprocess.call(clean_cmd, shell=True)
exit(0)
else:
if "github" in urlType:
Log("Selected target as Github host.")
base_url = "https://github.com"
Msg("Type Github user name or organization that is hosted target sources.")
Log("Remembered github user name.")
github_un = raw_input()
Msg("Type repository name that is hosted target sources.")
Log("Remembered repository name.")
git_repo = raw_input()
target_path = git_repo
clone_url = base_url + '/' + github_un + '/' + git_repo + '.git'
Log("Generated url " + clone_url)
elif "gitlab" in urlType:
base_url = "https://gitlab.com"
Msg("Type Gitlab user name or organization that is hosted target sources.")
Log("Remembered gitlab user name.")
gitlab_un = raw_input()
Msg("Type repository name that is hosted target sources.")
Log("Remembered repository name.")
git_repo = raw_input()
target_path = git_repo
clone_url = base_url + '/' + gitlab_un + '/' + git_repo + '.git'
Log("Generated url " + clone_url)
else:
Msg("Sorry, this host is not supported.")
Log("INPUTED HOST IS NOT SUPPORTED. " + urlType, "ERROR")
sys.exit(1)
return clone_url
def getTargetSource(git_url):
global target_path
if os.path.exists(target_path):
Msg("This repository is already cloned...")
Log("Target repository found.")
Msg("Automatically update this sources.")
Log("Updated to latest spurce automatically.")
shutil.rmtree(target_path)
get_cmd = "git clone " + git_url
get_src = subprocess.call(get_cmd, shell=True)
if get_src != 0:
Msg("Failed to get target sources.")
Log("FAILED TO CLONE REPOSITORY. CHECK REPO NAME AGAIN.", "ERROR")
Msg("Check hosted url or git username or repository name.")
Log("EXIT SCRIPT WITH 1.", "EXIT STATUS")
sys.exit(1)
else:
Msg("Cloning target sources is completed.")
Log("Completed to clone repository.")
def compileSwift(module_name):
os.chdir(module_name)
if os.path.isfile(".swift-version"):
print "####################################################################"
print "## Would you compile this project on latest snapshot? (yes or no) ##"
print "####################################################################"
rc = raw_input()
if rc == "yes" or rc == "y":
os.remove(".swift-version")
elif rc == "no" or rc == "n":
print "Using Swift snapshot version is:"
# subprocess.call(" ", shell=True)
print ""
else:
Msg("Please type (yes or no) or (y or n).", "ERROR")
Log("SOMETHING WRONG.", "ERROR")
cmd_swift_v = 'swift --version'
cmd_swift_build = 'swift build'
subprocess.call(cmd_swift_v, shell=True)
subprocess.call(cmd_swift_build, shell=True)
def main():
Init()
print "####################################################################"
print "## Autoswift | v0.0.4-a ##"
print "####################################################################"
url = getGitUrl()
Msg("Cloning target sources...")
getTargetSource(url)
compileSwift(target_path)
main()
|
class Node:
def __init__(self,data = None, next = None):
self.data = data
self.next = next
class Linkedlist:
def __init__(self):
self.head = None
def insert_at_begining(self,data):
node = Node(data,self.head)
self.head = node
def print(self):
if self.head is None:
print("list is empty")
return
itr = self.head
list = ''
while itr:
list+=str(itr.data) + '-->'
itr = itr.next
print(list)
def insert_at_end(self,data):
if self.head is None:
self.head = Node(data,None)
return
itr = self.head
while itr.next:
itr = itr.next
itr.next = Node(data,None)
def insert_values(self,data_list):
self.head = None
for data in data_list:
self.insert_at_end(data)
def get_len(self):
count = 0
itr = self.head
while itr:
count+=1
itr = itr.next
return count
if __name__ == '__main__':
l1 = Linkedlist()
l1.insert_at_begining(5)
l1.insert_at_begining(54)
l1.insert_at_end(33)
l1.print()
l1.insert_values("a b c d e".split(" "))
l1.print()
print(l1.get_len())
|
# Задача 1. Вариант 6.
# Напишите программу, которая будет сообщать род деятельности и псевдоним под
# которым скрывается Йоханнес Бруфельдт. После вывода информации программа
# должна дожидаться пока пользователь нажмет Enter для выхода.
# Ivanov S. E.
# 15.09.2016
print('Йоханнес Бруфельдт - финский писатель, журналист, переводчик. Сменил имя на Ахо Юхани.')
input('Нажмите enter для выхода') |
# 環
# 積についてモノイド
# 加法についてアーベル群
from Algebra.Monoid import Monoid
from Algebra.AdditiveGroup import AdditiveGroup
from typing import TypeVar
RingType = TypeVar('RingType', bound='Ring')
class Ring(Monoid, AdditiveGroup):
# 分配法則
def testDistributive(self: RingType, a: RingType, b: RingType) -> bool:
return self * (a + b) == self * a + self * b
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def links(requst):
return HttpResponse('links')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-04-24 16:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, editable=False)),
('updated_at', models.DateTimeField(db_index=True, editable=False)),
('title', models.CharField(max_length=128)),
('body', models.TextField()),
('slug', models.SlugField(max_length=130)),
('published_status', models.PositiveSmallIntegerField(choices=[(1, 'draft'), (2, 'published'), (3, 'archived')], default=1)),
('published_at', models.DateTimeField(blank=True, null=True)),
],
options={
'verbose_name': 'article',
'verbose_name_plural': 'articles',
},
),
migrations.CreateModel(
name='ArticleCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(db_index=True, editable=False)),
('updated_at', models.DateTimeField(db_index=True, editable=False)),
('title', models.CharField(max_length=128)),
('slug', models.SlugField(max_length=130, unique=True)),
('description', models.TextField(blank=True, null=True)),
('is_published', models.BooleanField(default=True)),
],
options={
'verbose_name': 'Article Category',
'verbose_name_plural': 'Article Categories',
},
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='articles', to='articles.ArticleCategory'),
),
]
|
import requests
from lxml import etree
from bs4 import BeautifulSoup as bs
import queue as Queue
import threading
import time,re
from func import gene_headers
# write proxy
def writeproxy(porxyinfo):
now_time = time.strftime('%Y%m%d')
with open('proxy/proxyinfo_%s.txt'%now_time,'a+') as f:
f.write(porxyinfo+'\n')
# return page code
def GetPageText(url):
r = requests.get(url,headers=gene_headers())
return r.text
# return post urllist
def GetPostUrl(source):
posturllist = []
iplist = bs(source,'lxml').find("table",{"id":"ip_list"}).findAll("tr")[1:]
for item in iplist:
getinfo = item.findAll("td")
ip = getinfo[1].get_text(strip='\r\n')
port = getinfo[2].get_text(strip='\r\n')
address = getinfo[3].get_text(strip='\r\n')
type = getinfo[5].get_text(strip='\r\n')
posturllist.append(type.lower()+'#'+ip+':'+port)
return posturllist
def Checkproxy(porxyinfo):
#正则匹配进行筛选
if not re.match(r'^http#\d+\.\d+\.\d+\.\d+\:\d+$',porxyinfo):
return
proxies = {}
proxies['http'] = porxyinfo.split('#')[1]
try:
r = requests.get("http://ip.chinaz.com/", proxies=proxies,timeout=3,headers=gene_headers())
except:
print('%s,不可用'%porxyinfo)
else:
writeproxy(porxyinfo)
print('%s,可用'%porxyinfo)
# if r:
# print(proxies, bs(requests.get('http://ip.chinaz.com/').content,'lxml').find("span",{"class":"info3"}).get_text(strip='\r\n'))
# writeproxy(porxyinfo)
# else:
# print('No')
def getproxyid():
start = time.time()
queue = Queue.Queue()
class ThreadUrl(threading.Thread):
"""Threaded Url Grab"""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
global mutex
def run(self):
while True:
porxyinfo = self.queue.get()
try:
mutex.acquire(5)
try:
Checkproxy(porxyinfo)
except:
time.sleep(0.15)
mutex.release()
self.queue.task_done()
continue
time.sleep(0.15)
mutex.release()
self.queue.task_done()
except Exception as e:
time.sleep(0.15)
self.queue.task_done()
pagenum =5
targets = ['http://www.xicidaili.com/nn/%d'%page for page in range(1,pagenum+1)]
targets += ['http://www.xicidaili.com/wn/%d'%page for page in range(1,pagenum+1)]
for proxyurl in targets:
try:
PageText = GetPageText(proxyurl)
except Exception as e:
print(e)
break
PostUrlList = GetPostUrl(PageText)
#进程循环
# for url in PostUrlList:
# Checkproxy(url)
#多线程
mutex = threading.Lock()
for i in range(5):
t = ThreadUrl(queue)
t.setDaemon(True)
try:
t.start()
except:
pass
for host in PostUrlList:
queue.put(host)
queue.join()
print("Elapsed Time: %s" % (time.time() - start) )
if __name__ == '__main__':
getproxyid()
|
from flask import Flask, redirect, render_template, request, session, url_for, flash
from flask_dropzone import Dropzone
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
import os
from functools import wraps
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import create_engine, Date
from database.tabledef import User, UserImage, ClassImage
from PIL import Image
import requests
from io import BytesIO
import datetime
from multiprocessing import Queue
import image_processing
import imagehash
image_queue = Queue()
image_processer = None
engine = create_engine('sqlite:///ap.db', echo=True)
app = Flask(__name__)
# Uploads settings
app.config['UPLOADED_PHOTOS_DEST'] = os.getcwd() + '/uploads'
app.config['SECRET_KEY'] = 'supersecretkeygoeshere'
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app)
dropzone = Dropzone(app)
app.config['DROPZONE_UPLOAD_MULTIPLE'] = True
app.config['DROPZONE_ALLOWED_FILE_CUSTOM'] = True
app.config['DROPZONE_ALLOWED_FILE_TYPE'] = 'image/*'
app.config['DROPZONE_REDIRECT_VIEW'] = 'view_images'
def login_required(func):
@wraps(func)
def wrap(*args, **kwargs):
if session.get('user_id'):
return func(*args, **kwargs)
else:
flash('You need to login first')
return redirect(url_for('home'))
return wrap
@app.route('/')
def home(status=None):
if not session.get('user_id'):
return render_template('login.html', status=None)
Session = scoped_session(sessionmaker(bind=engine))
sess = Session()
query = sess.query(UserImage).filter(UserImage.userid.in_([session['user_id']]))
result = query.all()
file_urls = []
for userimg in result:
photo_info = {'url': userimg.imgurl, 'width': userimg.imgw, 'height': userimg.imgh, 'id': userimg.id}
file_urls.append(photo_info)
session['file_urls'] = file_urls
sess.close()
return render_template('view_images.html', file_urls=file_urls)
@app.route('/signup', methods=['GET', 'POST'])
def signup(status=None):
if request.method == 'GET':
if not session.get('user_id'):
return render_template('signup.html', status=status)
else:
return redirect(url_for('home'))
elif request.method == 'POST':
try:
email = request.form['email']
username = request.form['username']
password = request.form['password']
confirmedpassword = request.form['confirmpassword']
if password != confirmedpassword:
return render_template('signup.html', status=True)
print(email + '\t' + username + '\t' + password + '\t' + confirmedpassword)
Session = scoped_session(sessionmaker(bind=engine))
sess = Session()
user = User(username, password, email)
sess.add(user)
sess.commit()
sess.close()
return redirect(url_for('home'))
except Exception as ex:
print("error in insert operation", ex)
return "Register error"
@app.route('/view_similar_images', methods=['GET', 'POST'])
@login_required
def view_similar_images():
Session = scoped_session(sessionmaker(bind=engine))
sess = Session()
query = sess.query(UserImage).filter(UserImage.userid.in_([session['user_id']]))
result = query.all()
groups = {}
for userimg in result:
if userimg.groupid is not None:
photo_info = {'url': userimg.imgurl, 'width': userimg.imgw, 'height': userimg.imgh, 'id': userimg.id}
if userimg.groupid not in groups:
groups[userimg.groupid] = []
groups[userimg.groupid].append(photo_info)
sess.close()
print(groups)
return render_template('view_similar_images.html', groups=groups)
@app.route('/delete_images', methods=['GET', 'POST'])
@login_required
def delete_images():
print("enter delete")
Session = scoped_session(sessionmaker(bind=engine))
sess = Session()
if request.method == "POST":
data = request.get_json()
list_ids = data['data']
for did in list_ids:
print("delete id: " + did)
sess.query(UserImage).filter(UserImage.id == did).delete()
sess.commit()
sess.close()
if len(list_ids):
session.pop('file_urls', None)
return redirect(url_for("view_similar_images"))
@app.route('/login', methods=['POST'])
def do_admin_login():
POST_USERNAME = str(request.form['username'])
POST_PASSWORD = str(request.form['password'])
Session = scoped_session(sessionmaker(bind=engine))
sess = Session()
query = sess.query(User).filter(User.username.in_([POST_USERNAME]), User.password.in_([POST_PASSWORD]))
print(POST_USERNAME + '\t' + POST_PASSWORD)
result = query.first()
sess.close()
if result:
status = None
session['user_name'] = POST_USERNAME
session['user_id'] = result.userid
else:
status = True
flash('wrong password!')
return home(status)
@app.route("/logout")
@login_required
def logout():
session['user_id'] = None
session.pop('file_urls', None)
return home()
@app.route('/upload_image', methods=['GET', 'POST'])
@login_required
def upload_image():
global image_queue
# set session for image results
if "file_urls" not in session:
session['file_urls'] = []
# list to hold our uploaded image urls
file_urls = session['file_urls']
# handle image upload from Dropzone
if request.method == 'POST':
Session = scoped_session(sessionmaker(bind=engine))
sess = Session()
file_obj = request.files
for f in file_obj:
file = request.files.get(f)
# save the file with to our photos folder
filename = photos.save(
file,
name=file.filename.lower()
)
# append image urls
photo_url = photos.url(filename)
response = requests.get(photo_url)
img = Image.open(BytesIO(response.content))
(w, h) = img.size
photo_info = {'url': photo_url, 'width': w, 'height': h}
file_urls.insert(0, photo_info)
id = int(datetime.datetime.now().strftime("%Y%m%d%H%M%f"))
user_img = UserImage(id, session['user_id'], photo_url, img.size, None, uploaddate=datetime.date.today())
sess.add(user_img)
sess.commit()
image_queue.put((id, img))
session['file_urls'] = file_urls
return "uploading..."
# return dropzone template on GET request
return render_template('upload_image.html')
@app.route('/view_images')
@login_required
def view_images():
# redirect to home if no images to display
if "file_urls" not in session:
return redirect(url_for('home'))
# set the file_urls and remove the session variable
file_urls = session['file_urls']
print(file_urls)
return render_template('view_images.html', file_urls=file_urls)
@app.route('/view_albums')
@login_required
def view_albums():
Session = scoped_session(sessionmaker(bind=engine))
sess = Session()
query = sess.query(UserImage, ClassImage).filter(UserImage.classid == ClassImage.id).distinct(ClassImage.id)
result = query.all()
list_class = []
classid = []
for userimg in result:
if userimg[1].id not in classid:
photo_info = {'url': userimg[0].imgurl, 'width': userimg[0].imgw, 'height': userimg[0].imgh, 'image_id': userimg[0].id, 'classid': userimg[1].id, 'classname': userimg[1].name}
classid.append(userimg[1].id)
list_class.append(photo_info)
return render_template('image_album.html', list_class = list_class)
@app.route('/view_images_of_album/<int:id>')
@login_required
def view_images_of_album(id):
Session = scoped_session(sessionmaker(bind=engine))
sess = Session()
query = sess.query(UserImage).filter(UserImage.userid.in_([session['user_id']])).filter(UserImage.classid == id)
result = query.all()
file_urls = []
for userimg in result:
photo_info = {'url': userimg.imgurl, 'width': userimg.imgw, 'height': userimg.imgh, 'id': userimg.id}
file_urls.append(photo_info)
sess.close()
return render_template('view_images.html', file_urls=file_urls)
@app.route('/show_duplicated_images')
@login_required
def show_duplicated_images():
image_processer.image_clustering()
return redirect(url_for('view_similar_images'))
def start_processes():
return image_processing.start_process(image_queue)
if __name__ == "__main__":
image_processer = start_processes()
app.run(host='0.0.0.0', port=4000, debug=True)
|
import random
class Card:
HEARTS = "Hearts"
DIAMONDS = "Diamonds"
SPADES = "Spades"
CLUBS = "Clubs"
VALUES = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']
SUITS_RANK = ["Spades", "Clubs", "Diamonds", "Hearts"]
def __init__(self, value, suit):
self.value = value
self.suit = suit
def to_str(self):
icons = {
"Hearts": '\u2665',
"Diamonds": '\u2666',
"Spades": '\u2663',
"Clubs": '\u2660',
}
return f"{self.value}{icons[self.suit]}"
def equal_suit(self, other_card):
return self.suit == other_card.suit
def more(self, other_card):
if self.value != other_card.value:
return Card.VALUES.index(self.value) > Card.VALUES.index(other_card.value)
else:
return Card.SUITS_RANK.index(self.suit) > Card.SUITS_RANK.index(other_card.suit)
def less(self, other_card):
if self.value != other_card.value:
return Card.VALUES.index(self.value) < Card.VALUES.index(other_card.value)
else:
return Card.SUITS_RANK.index(self.suit) < Card.SUITS_RANK.index(other_card.suit)
class Deck:
def __init__(self):
self.cards = []
self.create_deck()
def create_deck(self):
res = list()
for suit in [Card.HEARTS, Card.DIAMONDS, Card.SPADES, Card.CLUBS]:
for value in Card.VALUES:
res.append(Card(value, suit))
self.cards = res
def show(self):
to_show = [card.to_str() for card in self.cards]
return f'deck[{len(self.cards)}]: {", ".join(to_show)}'
def draw(self, number):
to_show = self.cards[:number]
self.cards = self.cards[number:]
return to_show
def shuffle(self):
random.shuffle(self.cards)
# Создаем колоду
deck = Deck()
# Выводим колоду в формате указанном в основном задании
print(deck.show())
# Тусуем колоду
deck.shuffle()
print(deck.show())
# Возьмем 5 карт "в руку"
hand = deck.draw(5)
# Выводим колоду, чтобы убедиться что 5 верхних карт отсутствуют
print(deck.show())
# Выводим список карт "в руке"(список hand)
print(' '.join([card.to_str() for card in hand]))
print(Card('10', Card.DIAMONDS).more(Card('10', Card.HEARTS)))
deck = Deck()
deck.shuffle()
print(deck.show())
# Берем две карты из колоды
card1, card2 = deck.draw(2)
# Тестируем методы .less() и .more()
if card1.more(card2):
print(f"{card1.to_str()} больше {card2.to_str()}")
if card1.less(card2):
print(f"{card1.to_str()} меньше {card2.to_str()}") |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Title :demo_summary.py
# Description :
# Author :Devon
# Date :2018/6/1
# Version :1.0
# Platform : windows
# Usage :python test4.py
# python_version :2.7.14
#==============================================================================
import MySQLdb
import sys,os
# 脚本来源网站:https://www.jb51.net/list/list_97_1.htm
def get_data(sql):
db = MySQLdb.connect("10.9.2.72", "test_kill_user", "123123")
cursor = db.cursor()
cursor.execute(sql)
d = cursor.fetchall()
print d[0]
print len(d)
for row in xrange(len(d)):
id = d[row][0]
user = d[row][1]
cmd = d[row][4]
time = d[row][5]
state = d[row][6]
# print type(cmd),cmd
try:
if cmd.index('Sleep') > -1 and time > 100:
if user.index('baymax_php') > -1:
print id,user,time,cmd,state
db1 = MySQLdb.connect("10.9.2.72", "test_kill_user", "123123")
cursor1 = db1.cursor()
kill_sql = 'kill %d;'%(id)
print kill_sql
cursor.execute(kill_sql)
db1.close()
except Exception,o:
pass
print type(d)
print d
db.close()
return d
def test_sql_data(sql):
db = MySQLdb.connect("10.9.2.72", "test_kill_user", "123123")
with db:
cursor = db.cursor()
cursor.execute(sql)
d = cursor.fetchall()
return d
#遍历字典
def traversal_dict():
a = {'a': '1', 'b': '2', 'c': '3'}
for key in a:
print key + " :"+a[key]
# 获取当前时间对应unix时间戳
import datetime
import time
class GetNowTimestamp(object):
def get_now_timestamp(self):
return time.mktime(datetime.datetime.now().timetuple())
def common_code_seg():
# 使用集合去除重复的元素
targetList = [1,1,2,22,2,2,2,12]
targetList = list(set(targetList))
print targetList
def main():
# get_data('show full processlist;')
test_sql_data('show full processlist;')
if __name__ == '__main__':
# main()
# traversal_dict()
# a1 = GetNowTimestamp()
# print a1.get_now_timestamp()
common_code_seg()
|
import inspect
from Dto import Student
from Dto import Classroom
from Dto import Course
class _Courses:
def __init__(self, conn):
self._conn = conn
def insert(self, course):
self._conn.execute("""
INSERT INTO courses (id, course_name, student, number_of_students, class_id, course_length) VALUES (?, ?, ?, ?, ?, ?)
""", [course.id, course.course_name, course.student, course.number_of_student, course.class_id, course.course_length])
def find(self, id):
c = self._conn.cursor()
c.execute("""
SELECT * FROM courses WHERE id=(?)
""", [id])
return Course(*c.fetchone())
def num_of_courses(self):
c = self._conn.cursor()
c.execute("""
SELECT COUNT(id) FROM Courses
""")
return c.fetchone()[0]
def find_all(self):
c = self._conn.cursor()
all = c.execute("""
SELECT * FROM Courses
""").fetchall()
return [Course(*row) for row in all]
def delete_course(self, id):
c = self._conn.cursor()
c.execute("""
DELETE FROM courses WHERE id=(?)""", [id])
def find_by_class_id(self, classroom_id):
c = self._conn.cursor()
c.execute("""
SELECT * FROM courses WHERE class_id = (?)""", [classroom_id])
cursor_fetch = c.fetchone()
if cursor_fetch is not None:
return Course(*cursor_fetch)
else:
return None
def find_all_by_tuple(self):
c = self._conn.cursor()
all_tuples = c.execute("""
SELECT id, course_name, student, number_of_students, class_id, course_length FROM courses
""").fetchall()
return all_tuples
class _Students:
def __init__(self, conn):
self._conn = conn
def insert(self, grade, count):
self._conn.execute("""
INSERT INTO students VALUES (?, ?)
""", [grade, count])
def find(self, grade):
c = self._conn.cursor()
c.execute("""
SELECT grade, count FROM students WHERE grade = ?
""", [grade])
return Student(*c.fetchone())
def deduct_amount_by_type(self, student, num_to_deduct):
self._conn.execute("""
UPDATE students SET count = (?) WHERE grade = (?) """, [student.count-num_to_deduct, student.grade])
def find_all_by_tuple(self):
c = self._conn.cursor()
all_tuples = c.execute("""
SELECT grade, count FROM students""").fetchall()
return all_tuples
class _Classrooms:
def __init__(self, conn):
self._conn = conn
def insert(self, classroom):
self._conn.execute("""
INSERT INTO classrooms (id, location, current_course_id, current_course_time_left) VALUES (?, ?, ?, ?)
""", [classroom.id, classroom.location, classroom.current_course_id, classroom.current_course_time_left])
def find(self, id):
c = self._conn.cursor()
c.execute("""
SELECT id, location, current_course_id, current_course_time_left FROM classrooms WHERE id=(?)
""", [id])
return Classroom(*c.fetchone())
def update_course(self, id, course_id, course_length):
self._conn.execute("""
UPDATE classrooms SET current_course_id = (?), current_course_time_left = (?) WHERE id = (?)
""", [course_id, course_length, id])
def decrease_time(self, classroom):
self._conn.execute("""
UPDATE classrooms SET current_course_time_left=(?) WHERE id=(?)""", [classroom.current_course_time_left-1, classroom.id])
def reset_classroom(self, classroom):
self._conn.execute("""
UPDATE classrooms SET current_course_id = 0 WHERE id = (?)""", [classroom.id])
def find_all(self):
c = self._conn.cursor()
all = c.execute("""
SELECT id, location, current_course_id, current_course_time_left FROM classrooms
""").fetchall()
return [Classroom(*row) for row in all]
def find_all_by_tuple(self):
c = self._conn.cursor()
all_tuples = c.execute("""
SELECT id, location, current_course_id, current_course_time_left FROM classrooms
""").fetchall()
return all_tuples |
import matplotlib.pyplot as plt
from os import chdir
from iris import load_cube, Constraint
from iris.analysis import MEAN, STD_DEV
from iris.analysis.maths import abs
from iris.time import PartialDateTime
import cartopy.crs as ccrs
from cartopy.feature import LAND
from numpy import sqrt
from numpy.ma import masked
from sys import argv
def scatter_field(cube, significant, label='', symmetric_cbar=False):
# Convert innovations into a form that can be scatter plotted
lats = []; lons = []; innovs = []
# Loop over latitude
for lat_i in range(len(cube.data[:,1])):
# Loop over longitude
for lon_i in range(len(cube.data[1,:])):
# Don't plot missing values
if cube.data[lat_i,lon_i] is not masked and significant[lat_i,lon_i]:
lats.append(cube.coord('latitude').points[lat_i])
lons.append(cube.coord('longitude').points[lon_i])
innovs.append(cube.data[lat_i,lon_i])
color_map = plt.get_cmap('RdYlBu') if symmetric_cbar else plt.get_cmap('inferno')
fig = plt.figure(figsize=(8,5))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_feature(LAND, facecolor='0.5')
ax.set_global()
cont = plt.scatter(lons, lats, s=22, c=innovs, transform=ccrs.PlateCarree(), cmap=color_map, zorder=2)
cb = plt.colorbar(cont, orientation='horizontal', fraction=0.0375, pad=0.02, aspect=50)
cb.set_label(label)
plt.tight_layout()
if symmetric_cbar:
mean_innov = 0.0
width = max(max(innovs)-mean_innov, mean_innov-min(innovs))
plt.clim(mean_innov - width, mean_innov + width)
# Get experiment name
experiment = argv[1]
# Set precision
prec = argv[2]
# Choose field to plot
field = argv[3]
# Set sigma level if present
if len(argv) > 4:
sigma = float(argv[4])
# Change to relevant experiment directory
chdir(f'../experiments/{experiment}')
# Get background ensemble mean
background = load_cube(f'{prec}/gues_mean.nc', field)
# Get observations
observations = load_cube('obs.nc', field)
# Compute innovations
innovations = background - observations
# Extract level if not computing surface pressure innovations
if field != 'Surface Pressure [Pa]':
innovations = innovations.extract(Constraint(atmosphere_sigma_coordinate=sigma))
# Compute time statistics after March 1st 00:00
innovations = innovations.extract(Constraint(time=lambda t: t > PartialDateTime(month=3,day=1)))
# Compute mean and standard deviation of innovations
innov_mean = innovations.collapsed('time', MEAN)
innov_std = innovations.collapsed('time', STD_DEV)
# Compute mask for statistical significance (> 1.96σ from 0.0)
significant = abs(innov_mean/(innov_std/sqrt(innovations.coord('time').points.shape[0]))).data > 1.96
# Plot innovation mean and standard deviation on a map
scatter_field(innov_mean, significant, label=field, symmetric_cbar=True)
filename = f'innovs_{"".join([c for c in field if c.isalnum()]).rstrip()}_{prec}'
if len(argv) > 4:
filename += f'_{sigma}'
plt.savefig(f'{filename}.pdf', bbox_inches='tight')
plt.show()
|
class Timer:
def __init__(self, font: object, init_value: dict = None) -> None:
self.font = font
self.active = False
if init_value:
self.value = init_value
else:
self.value = {'h': '00', 'm': '00', 's': '00', 'ms': '000'}
def reset(self) -> None:
self.value = {'h': '00', 'm': '00', 's': '00', 'ms': '000'}
def increment_ms(self, incr_value: float) -> None:
self.value['ms'] = int(self.value['ms']) + incr_value
# increment other stuff if... y'know
if self.value['ms'] > 999:
self.value['s'] = int(self.value['s']) + 1
self.value['ms'] = '000'
if self.value['s'] == 60:
self.value['m'] = int(self.value['m']) + 1
self.value['s'] = '00'
if self.value['m'] == 60:
self.value['h'] = int(self.value['h']) + 1
self.value['m'] = '00'
# correct formatting
for key in self.value:
if key != 'ms':
if len(str(self.value[key])) != 2:
self.value[key] = f'0{self.value[key]}'
else:
self.value[key] = str(self.value[key])
else:
if len(str(self.value[key])) == 1:
self.value[key] = f'00{self.value[key]}'
elif len(str(self.value[key])) == 2:
self.value[key] = f'0{self.value[key]}'
else:
self.value[key] = str(self.value[key])
def render(self, display: object, pos: tuple, color: tuple = (237, 224, 212)) -> None:
text = f"{self.value['h']}:{self.value['m']}:{self.value['s']}:{self.value['ms']}"
display.blit(self.font.render(text, 0, color), pos)
|
import socket
import time
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('localhost', 8000))
a = client_socket.send('From client_1'.encode('utf-8'))
print(a)
time.sleep(10)
data = client_socket.recv(100)
print(data)
while True:
b=1
client_socket.close() |
# -*- coding: utf-8 -*-
#############
#
# Copyright - Nirlendu Saha
#
# author - nirlendu@gmail.com
#
#############
import inspect
import sys
from app_core import core_interface as core
from libs.logger import app_logger as log
def new_person(
user_name,
person_name,
):
"""New Person Primary views
:param user_name:
:param person_name:
:return:
"""
log.debug('New Person INSERT')
return core.new_person(
user_name= user_name,
person_name=person_name,
) |
import plyvel
from tqdm import tqdm
db = plyvel.DB("artist.ldb", create_if_missing=False)
r = []
for k, v in tqdm(db):
if v == b"Japan":
r.append(k.decode("utf-8"))
for k in r:
print(k)
print("Total: {}".format(len(r)))
|
from brownie import *
from time import sleep
import itertools
from variables import *
import itertools
import json
import os
import concurrent.futures
COUNTER = itertools.count()
ACC_LIST = []
ACC_INDEX = itertools.count()
# Expectations
def _bnb_price():
assert chain.id == 56, "_bnbPrice: WRONG NETWORK. This function only works on bsc mainnet"
pair_busd = Contract.from_explorer(busd_wbnb_addr)
(reserveUSD, reserveBNB, _) = pair_busd.getReserves()
price_busd = reserveBNB / reserveUSD
return round(price_busd, 2)
def _quote(amin, reserve_in, reserve_out):
if reserve_in == 0 and reserve_out == 0:
return 'empty reserves, no quotation'
amount_in_with_fee = amin * 998
num = amount_in_with_fee * reserve_out
den = reserve_in * 1000 + amount_in_with_fee
return round(num / den)
def _expectations(my_buy, external_buy, reserve_in, reserve_out, queue_number):
i = 1
add_in = 0
sub_out = 0
while i < queue_number:
amout = _quote(external_buy, reserve_in + add_in, reserve_out - sub_out)
add_in += external_buy
sub_out += amout
i += 1
bought_tokens = _quote(my_buy, reserve_in + add_in, reserve_out - sub_out)
price_per_token = my_buy / bought_tokens
return bought_tokens, price_per_token, add_in
def expectations(my_buy, external_buy, reserve_in, reserve_out, base_asset = "BNB"):
bnb_p = _bnb_price()
print(
f'--> if the liq added is {reserve_in} BNB / {reserve_out} tokens and I want to buy with {my_buy} BNB : \n')
for i in range(1, 30, 1):
(bought_tokens, price_per_token, add_in) = _expectations(
my_buy, external_buy, reserve_in, reserve_out, i)
if base_asset == "BNB":
print(
f'amount bought: {bought_tokens} | {round(price_per_token, 5)} BNB/tkn | {round(price_per_token * bnb_p, 7) } $/tkn | , capital entered before me: {add_in} BNB')
else:
print(
f'amount bought: {bought_tokens} | {round(price_per_token, 5)} BNB/tkn| , capital entered before me: {add_in} BNB')
print(f'\n--> BNB price: {bnb_p} $')
print("WARNING: exit and restart brownie to be sure variables corrections are taken into account!\n")
input("Press any key to continue, or ctrl+c to stop and try other expectation parameters")
# Swarmer
def create_temp_address_book(tmp_path):
"""create the temporary csv file that store addresses"""
try:
os.remove(tmp_path)
except:
pass
finally:
with open(tmp_path, "w"):
pass
def save_address_book(tmp_path, path):
print("---> Saving address book...")
with open(tmp_path, "r") as address_book:
data = json.load(address_book)
for account in data:
addr = account["address"]
balance = accounts.at(addr).balance() / 10**18
account["balance"] = balance
with open(path, "w") as final_address_book:
json.dump(data, final_address_book, indent=2)
print("Done!")
def create_account():
idx = next(ACC_INDEX)
new_account = web3.eth.account.create()
new_account = accounts.add(new_account.key.hex())
pk = new_account.private_key
account_dict = {
"id": idx,
"address": new_account.address,
"pk": pk
}
ACC_LIST.append(account_dict)
return new_account
def swarming(acc):
sleep(10)
new_account = create_account()
pk = acc["pk"]
bee = accounts.add(pk)
tx = bee.transfer(
to=new_account.address,
amount=bee.balance() // 2,
silent=True,
gas_limit=22000,
allow_revert=True)
return f'bee{acc["id"]} --> paid {tx.value / 10**18} BNB to new_account'
def _initSwarm(tmp_path, path, rounds, bnb_amount):
create_temp_address_book(tmp_path)
print("(admin account)")
me = accounts.load(DISPERSER_ACCOUNT)
old_balance = me.balance()
print(f'\n--> seed account balance: {old_balance/10**18} BNB\n')
account0 = create_account().address
print("\nCREATING ACCOUNTS SWARM...\n")
tx = me.transfer(to=account0, amount=f'{bnb_amount} ether', silent=True)
print(f'seed --> paid {tx.value / 10**18} BNB to new_account')
# spreading bnb among the swarm
COUNTER = itertools.count()
for _ in range(rounds):
n = next(COUNTER)
print(f'\nROUND n°{n}\n')
tmp_acc_list = ACC_LIST.copy()
with concurrent.futures.ThreadPoolExecutor() as executor:
results = [executor.submit(swarming, acc)
for acc in tmp_acc_list]
for f in concurrent.futures.as_completed(results):
print(f.result())
with open(tmp_path, "a") as address_book:
json.dump(ACC_LIST, address_book, indent=2)
print('\nSWARM CREATED!\n')
print(f'Total accounts created: {len(ACC_LIST)}\n')
save_address_book(tmp_path, path)
def _refund(entry, me):
pk = entry["pk"]
acc = accounts.add(pk)
if acc.balance() > 0:
tx = acc.transfer(me, amount=acc.balance() -
21000 * 10**10, required_confs=0, silent=True)
return f'bee{entry["id"]} --> paid {tx.value/10**18} to seed address'
else:
return "empty balance"
def refund(path):
me = accounts.load('press1')
with open(path, "r") as book:
data = json.load(book)
with concurrent.futures.ThreadPoolExecutor() as executor:
results = [executor.submit(_refund, acc, me)
for acc in data]
for f in concurrent.futures.as_completed(results):
print(f.result())
pending = [True]
while True in pending:
pending.clear()
for tx in history:
pending.append(tx.status == -1)
print(f'remaining pending tx: {pending.count(True)}')
sleep(1)
print(f'\nREFUND DONE! --> seed balance : {me.balance()/10**18} BNB')
def _checkBalances(entry):
pk = entry["pk"]
acc = accounts.add(pk)
balance = acc.balance()
if balance / 10**18 > 0.0002:
print(f'bee{entry["id"]} : non empty balance: {balance/10**18} BNB')
return balance, 1
else:
return 0, 0
def swarmer(tmp_path, path, rounds, bnb_amount):
print("Checking for existing, non empty address book...")
with open(path, "r") as book:
data = json.load(book)
total_dust = 0
total_nonempty_bee = 0
for entry in data:
(balance, bee) = _checkBalances(entry)
total_dust += balance
total_nonempty_bee += bee
print(
f'\nFound an already existing address book with {total_nonempty_bee} non empty balance addresses')
print(f'Total BNB to claim: {total_dust/10**18}\n')
if total_dust > 0:
ipt = input("Launch refund? ('y' for yes, any other key for no)")
if ipt.lower() == "y":
refund(path)
else:
return
print(
f'\nReady to launch new swarm. Parameters:\n\t- Rounds: {rounds} ({2**rounds} addresses)\n\t- Number of BNB to spread: {bnb_amount}\n')
ipt = input("Initialise new swarm? ('y' for yes, any other key for no)")
if ipt.lower() == "y":
_initSwarm(tmp_path, path,rounds, bnb_amount )
else:
return
def createBeeBook():
swarmer(BEEBOOK_TMP_PATH, BEEBOOK_PATH, BEE_ROUNDS, BEE_BNB_AMOUNT )
# Trigger
def configureTrigger():
tokenToBuy = Contract.from_explorer(ttb_addr)
print(
f'\nCURRENT CONFIGURATION:\n\nWANT TO BUY AT LEAST {AMOUNT_OUT_MIN_TKN/10**18} {tokenToBuy.name()} (${tokenToBuy.symbol()})\nWITH {AMOUNT_IN_WBNB / 10**18} WBNB\n')
ipt = input(
"---> If this is ok, press 'y' to call configureSnipe, any other key to skip")
if ipt.lower() == 'y':
print("\n---> loading TRIGGER owner and admin wallet:")
print("(owner pwd)")
me = accounts.load(TRIGGER_OWNER)
print("(admin pwd)")
admin = accounts.load(TRIGGER_ADMIN)
tkn_balance_old = tokenToBuy.balanceOf(admin)
print("\n---> configuring TRIGGER for sniping")
trigger = Contract.from_explorer(trigger_addr)
trigger.configureSnipe(PAIRED_TOKEN, AMOUNT_IN_WBNB,
ttb_addr, AMOUNT_OUT_MIN_TKN, {'from': me, "gas_price": "10 gwei"})
triggerBalance = Contract.from_explorer(wbnb_addr).balanceOf(trigger)
if triggerBalance < AMOUNT_IN_WBNB:
amountToSendToTrigger = AMOUNT_IN_WBNB - triggerBalance + 1
assert me.balance() >= amountToSendToTrigger + 10**18 , "STOPING EXECUTION: TRIGGER DOESNT HAVE THE REQUIRED WBNB AND OWNER BNB BALANCE INSUFFICIENT!"
print(f'---> transfering {amountToSendToTrigger / 10**18} BNB to TRIGGER')
me.transfer(trigger, amountToSendToTrigger)
config = trigger.getSnipeConfiguration({'from': me})
assert config[0] == PAIRED_TOKEN
assert config[1] == AMOUNT_IN_WBNB
assert config[2] == ttb_addr
assert config[3] == AMOUNT_OUT_MIN_TKN
print("\nTRIGGER CONFIGURATION READY\n")
print(
f'---> Wbnb balance of trigger: {Contract.from_explorer(wbnb_addr).balanceOf(trigger)/10**18}')
print(
f'---> Token balance of admin: {tkn_balance_old/10**18 if tkn_balance_old != 0 else 0}\n\n')
def main():
print("\n///////////// EXPECTATION PHASE //////////////////////////\n")
expectations(mbuy, ext_buy, reserve_in, reserve_out)
print("\n///////////// BEE BOOK CREATION PHASE //////////////////////////////\n")
createBeeBook()
print("\n///////////// TRIGGER CONFIGURATION PHASE /////////////////////\n")
configureTrigger()
|
#import sys
#input = sys.stdin.readline
def main():
H = int( input())
W = int( input())
N = int( input())
if H < W:
H, W = W, H
ans = N//H
if N%H != 0:
ans += 1
print(ans)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Funtion:
# Filename:
#########################################################################################
'''
购物车程序:
1、启动程序后,输入用户名密码后,如果是第一次登录,让用户输入工资,然后打印商品列表
2、允许用户根据商品编号购买商品
3、用户选择商品后,检测余额是否够,够就直接扣款,不够就提醒
4、可随时退出,退出时,打印已购买商品和余额
5、在用户使用过程中, 关键输出,如余额,商品已加入购物车等消息,需高亮显示
6、用户下一次登录后,输入用户名密码,直接回到上次的状态,即上次消费的余额什么的还是那些,再次登录可继续购买
7、允许查询之前的消费记录
'''
#########################################################################################
import time, sys
def welcome():
'''登陆欢迎信息'''
for i in range(6) :
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.2)
print("\nwelcome! you are login...") # 相等打印欢迎信息
def file_2_dict(file_handle, split_str):
'''
文件转换为字典,
:param file_handle: 文件句柄
:param dict: 字典
:param split_str:
:return:
'''
_dict = {}
if file_handle.read() == '':
return -1
else :
for line in file_handle.readlines():
_key = line.split(split_str)[0].strip()
_val = line.split(split_str)[1].strip()
_dict[_key] = _val
print(_dict)
return _dict
# 用户登陆
# def login_fun(file_handle1, file_handle2): #正确登陆返回1
# file_2_dict(file_handle1)
username_passwd = {} # 用户名密码
err_info = {} # 错误登陆
user_file = open('username_passwd.txt', 'r+', encoding='utf-8')
print(user_file.read())
err_file = open('err.txt', 'r+', encoding='utf-8')
username_passwd = file_2_dict(user_file, ',')
print(username_passwd)
# file_2_dict(err_file, ':') |
from ED6ScenarioHelper import *
def main():
# 卢安
CreateScenaFile(
FileName = 'T2400 ._SN',
MapName = 'Ruan',
Location = 'T2400.x',
MapIndex = 1,
MapDefaultBGM = "ed60015",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'特蕾莎院长', # 9
'达尼艾尔', # 10
'玛丽', # 11
'克拉姆', # 12
'基库', # 13
'目标用摄像机', # 14
'鸡', # 15
'鸡', # 16
'鸡', # 17
'梅威海道方向', # 18
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH02590 ._CH', # 00
'ED6_DT07/CH02640 ._CH', # 01
'ED6_DT07/CH02630 ._CH', # 02
'ED6_DT07/CH02570 ._CH', # 03
'ED6_DT07/CH02320 ._CH', # 04
'ED6_DT06/CH20051 ._CH', # 05
'ED6_DT07/CH00040 ._CH', # 06
'ED6_DT07/CH00041 ._CH', # 07
'ED6_DT07/CH01720 ._CH', # 08
)
AddCharChipPat(
'ED6_DT07/CH02590P._CP', # 00
'ED6_DT07/CH02640P._CP', # 01
'ED6_DT07/CH02630P._CP', # 02
'ED6_DT07/CH02570P._CP', # 03
'ED6_DT07/CH02320P._CP', # 04
'ED6_DT06/CH20051P._CP', # 05
'ED6_DT07/CH00040P._CP', # 06
'ED6_DT07/CH00041P._CP', # 07
'ED6_DT07/CH01720P._CP', # 08
)
DeclNpc(
X = 0,
Z = 0,
Y = 33500,
Direction = 180,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 6000,
Z = 200,
Y = 22200,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 5800,
Z = 0,
Y = 23600,
Direction = 180,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 4300,
Z = 200,
Y = 22900,
Direction = 180,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 800,
Z = 6000,
Y = -13810,
Direction = 180,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x1C5,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x80,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 44200,
Z = 240,
Y = 18540,
Direction = 45,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 4,
)
DeclNpc(
X = 44200,
Z = 240,
Y = 18540,
Direction = 45,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 4,
)
DeclNpc(
X = 44200,
Z = 240,
Y = 18540,
Direction = 45,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 3,
TalkFunctionIndex = 0,
TalkScenaIndex = 4,
)
DeclNpc(
X = 1060,
Z = 0,
Y = -23220,
Direction = 0,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0xFF,
InitFunctionIndex = -1,
InitScenaIndex = -1,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclEvent(
X = -1880,
Y = 2000,
Z = 4450,
Range = 2800,
Unknown_10 = 0xFFFFFC18,
Unknown_14 = 0x14B4,
Unknown_18 = 0x0,
Unknown_1C = 6,
)
ScpFunction(
"Function_0_252", # 00, 0
"Function_1_2D9", # 01, 1
"Function_2_2EC", # 02, 2
"Function_3_302", # 03, 3
"Function_4_455", # 04, 4
"Function_5_4E1", # 05, 5
"Function_6_507", # 06, 6
"Function_7_1CE4", # 07, 7
"Function_8_1D31", # 08, 8
"Function_9_1DE5", # 09, 9
"Function_10_1E91", # 0A, 10
"Function_11_287F", # 0B, 11
"Function_12_28AA", # 0C, 12
"Function_13_28EE", # 0D, 13
"Function_14_2965", # 0E, 14
"Function_15_29DA", # 0F, 15
)
def Function_0_252(): pass
label("Function_0_252")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x85, 5)), scpexpr(EXPR_END)), "loc_25C")
Jump("loc_28B")
label("loc_25C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x85, 0)), scpexpr(EXPR_END)), "loc_266")
Jump("loc_28B")
label("loc_266")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x84, 0)), scpexpr(EXPR_END)), "loc_270")
Jump("loc_28B")
label("loc_270")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x83, 6)), scpexpr(EXPR_END)), "loc_27A")
Jump("loc_28B")
label("loc_27A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x82, 1)), scpexpr(EXPR_END)), "loc_284")
Jump("loc_28B")
label("loc_284")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x81, 5)), scpexpr(EXPR_END)), "loc_28B")
label("loc_28B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x7F, 2)), scpexpr(EXPR_END)), "loc_299")
OP_A3(0x3FA)
Event(0, 10)
label("loc_299")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x7F, 3)), scpexpr(EXPR_END)), "loc_2B0")
OP_4F(0x1, (scpexpr(EXPR_PUSH_LONG, 0x54), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_A3(0x3FB)
Event(0, 14)
label("loc_2B0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x7F, 4)), scpexpr(EXPR_END)), "loc_2C7")
OP_4F(0x1, (scpexpr(EXPR_PUSH_LONG, 0x56), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_A3(0x3FC)
Event(0, 15)
label("loc_2C7")
OP_51(0xC, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_OR), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Return()
# Function_0_252 end
def Function_1_2D9(): pass
label("Function_1_2D9")
OP_16(0x2, 0xFA0, 0xFFFE0C00, 0xFFFE5A20, 0x30067)
Return()
# Function_1_2D9 end
def Function_2_2EC(): pass
label("Function_2_2EC")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_301")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("Function_2_2EC")
label("loc_301")
Return()
# Function_2_2EC end
def Function_3_302(): pass
label("Function_3_302")
SetChrFlags(0xFE, 0x40)
SetChrFlags(0xFE, 0x4)
OP_8D(0xFE, -8760, 13210, 8700, 24630, 0)
OP_51(0xFE, 0x4, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0x168), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
label("loc_330")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_454")
Jc((scpexpr(EXPR_GET_CHR_WORK, 0xFE, 0x1), scpexpr(EXPR_PUSH_LONG, 0xBB8), scpexpr(EXPR_ADD), scpexpr(EXPR_GET_CHR_WORK, 0x0, 0x1), scpexpr(EXPR_GTR), scpexpr(EXPR_GET_CHR_WORK, 0xFE, 0x1), scpexpr(EXPR_PUSH_LONG, 0xBB8), scpexpr(EXPR_SUB), scpexpr(EXPR_GET_CHR_WORK, 0x0, 0x1), scpexpr(EXPR_LSS), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_GET_CHR_WORK, 0xFE, 0x3), scpexpr(EXPR_PUSH_LONG, 0xBB8), scpexpr(EXPR_ADD), scpexpr(EXPR_GET_CHR_WORK, 0x0, 0x3), scpexpr(EXPR_GTR), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_GET_CHR_WORK, 0xFE, 0x3), scpexpr(EXPR_PUSH_LONG, 0xBB8), scpexpr(EXPR_SUB), scpexpr(EXPR_GET_CHR_WORK, 0x0, 0x3), scpexpr(EXPR_LSS), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_419")
Jc((scpexpr(EXPR_PUSH_LONG, 0x2238), scpexpr(EXPR_NEG), scpexpr(EXPR_PUSH_LONG, 0x3E8), scpexpr(EXPR_ADD), scpexpr(EXPR_GET_CHR_WORK, 0xFE, 0x1), scpexpr(EXPR_LSS), scpexpr(EXPR_PUSH_LONG, 0x339A), scpexpr(EXPR_PUSH_LONG, 0x3E8), scpexpr(EXPR_ADD), scpexpr(EXPR_GET_CHR_WORK, 0xFE, 0x3), scpexpr(EXPR_LSS), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_PUSH_LONG, 0x21FC), scpexpr(EXPR_PUSH_LONG, 0x3E8), scpexpr(EXPR_SUB), scpexpr(EXPR_GET_CHR_WORK, 0xFE, 0x1), scpexpr(EXPR_GTR), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_PUSH_LONG, 0x6036), scpexpr(EXPR_PUSH_LONG, 0x3E8), scpexpr(EXPR_SUB), scpexpr(EXPR_GET_CHR_WORK, 0xFE, 0x3), scpexpr(EXPR_GTR), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_3EE")
SetChrFlags(0xFE, 0x20)
TurnDirection(0xFE, 0x0, 0)
ClearChrFlags(0xFE, 0x20)
def lambda_3DB():
OP_94(0x0, 0xFE, 0xB4, 0x12C, 0x1770, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_3DB)
Jump("loc_411")
label("loc_3EE")
def lambda_3F4():
OP_8D(0xFE, -8760, 13210, 8700, 24630, 6000)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_3F4)
Sleep(200)
label("loc_411")
Sleep(30)
Jump("loc_451")
label("loc_419")
Sleep(50)
Jc((scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0x28), scpexpr(EXPR_IMOD), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_451")
OP_44(0xFE, 0x2)
def lambda_439():
OP_8D(0xFE, -8760, 13210, 8700, 24630, 1500)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_439)
label("loc_451")
Jump("loc_330")
label("loc_454")
Return()
# Function_3_302 end
def Function_4_455(): pass
label("Function_4_455")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_4E0")
OP_43(0xFE, 0x2, 0x0, 0x5)
OP_22(0x191, 0x0, 0x64)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_IMOD), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_4E0")
Jc((scpexpr(EXPR_EXEC_OP, "OP_3E(0x38B, 1)"), scpexpr(EXPR_END)), "loc_4E0")
TalkBegin(0xFE)
OP_A2(0x0)
SetMessageWindowPos(-1, -1, -1, -1)
FadeToDark(300, 0, 100)
SetChrName("")
OP_22(0x11, 0x0, 0x64)
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x0),
"得到了\x07\x02",
"新鲜鸡蛋\x07\x00",
"。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(72, 320, 56, 3)
TalkEnd(0xFE)
label("loc_4E0")
Return()
# Function_4_455 end
def Function_5_4E1(): pass
label("Function_5_4E1")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_LSS), scpexpr(EXPR_END)), "loc_4FC")
RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_ADD_SAVE), scpexpr(EXPR_END)))
OP_48()
Jump("Function_5_4E1")
label("loc_4FC")
RunExpression(0x0, (scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Return()
# Function_5_4E1 end
def Function_6_507(): pass
label("Function_6_507")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x82, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_1CE3")
OP_A2(0x410)
EventBegin(0x0)
TurnDirection(0xB, 0x9, 0)
TurnDirection(0x9, 0xB, 0)
TurnDirection(0xA, 0xB, 0)
ClearChrFlags(0xB, 0x80)
ClearChrFlags(0x9, 0x80)
ClearChrFlags(0xA, 0x80)
Jc((scpexpr(EXPR_PUSH_VALUE_INDEX, 0xA), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_563")
OP_62(0x0, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
Jump("loc_57A")
label("loc_563")
OP_62(0x0, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
label("loc_57A")
def lambda_580():
OP_6C(45000, 2000)
ExitThread()
QueueWorkItem(0x101, 1, lambda_580)
OP_6D(5200, 0, 22840, 2000)
AddParty(0x35, 0xFF)
SetChrFlags(0x101, 0x1)
SetChrFlags(0x102, 0x1)
SetChrFlags(0x136, 0x1)
SetChrPos(0x101, -690, 0, 17260, 45)
SetChrPos(0x102, -130, 0, 16010, 45)
SetChrPos(0x136, 0, 0, 31800, 90)
SetChrFlags(0x136, 0x80)
ChrTalk(
0xA,
(
"克拉姆,\x01",
"你刚才到哪儿去了嘛!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"科洛丝姐姐担心死了,\x01",
"到处去找你呢!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#770F嘿嘿,用不着担心。\x02\x03",
"今天我可是有大收获哦~\x01",
"弄到了一个超~棒的东西呢~\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"是什么啊?给我们看看吧。\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#771F嘿嘿嘿,看了可别吃惊哦~\x02\x03",
"这东西是我从一个没头脑的\x01",
"大姐头身上弄过来的……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"……你说谁没头脑啊?\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
"#774F啊……\x02",
)
CloseMessageWindow()
def lambda_749():
label("loc_749")
TurnDirection(0xFE, 0xB, 0)
OP_48()
Jump("loc_749")
QueueWorkItem2(0x101, 2, lambda_749)
def lambda_75A():
label("loc_75A")
TurnDirection(0xFE, 0xB, 0)
OP_48()
Jump("loc_75A")
QueueWorkItem2(0x102, 2, lambda_75A)
def lambda_76B():
label("loc_76B")
TurnDirection(0xFE, 0x101, 0)
OP_48()
Jump("loc_76B")
QueueWorkItem2(0xB, 1, lambda_76B)
def lambda_77C():
label("loc_77C")
TurnDirection(0xFE, 0x101, 0)
OP_48()
Jump("loc_77C")
QueueWorkItem2(0x9, 1, lambda_77C)
def lambda_78D():
label("loc_78D")
TurnDirection(0xFE, 0x101, 0)
OP_48()
Jump("loc_78D")
QueueWorkItem2(0xA, 1, lambda_78D)
def lambda_79E():
OP_8E(0xFE, 0x654, 0x0, 0x56E0, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_79E)
def lambda_7B9():
OP_8E(0xFE, 0x618, 0x0, 0x5276, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x102, 1, lambda_7B9)
TurnDirection(0xB, 0x101, 400)
OP_6D(3750, 0, 22850, 1000)
WaitChrThread(0x101, 0x1)
OP_95(0xB, 0x0, 0x0, 0x0, 0x3E8, 0x2710)
OP_62(0xB, 0x0, 1700, 0x28, 0x2B, 0x64, 0x3)
OP_94(0x1, 0xB, 0xB4, 0x1F4, 0xBB8, 0x0)
ChrTalk(
0xB,
"#774F你、你们怎么会来这里……\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#006F哼哼哼。\x01",
"你也太小看我们游击士了吧?\x02\x03",
"像你这种淘气的调皮蛋一翘起尾巴,\x01",
"姐姐我就知道你有什么坏主意!\x01",
"更何况是找到你住在哪里!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#772F可、可恶……\x01",
"乖乖等你捉的是小狗!\x02",
)
)
CloseMessageWindow()
OP_62(0xB, 0x0, 1700, 0x28, 0x2B, 0x64, 0x3)
OP_8E(0xB, 0x15F4, 0x64, 0x52F8, 0x1B58, 0x0)
OP_8E(0xB, 0x2468, 0xC8, 0x529E, 0x1B58, 0x0)
OP_8E(0xB, 0x21CA, 0xFFFFFF38, 0x4AA6, 0x1B58, 0x0)
ChrTalk(
0x101,
"#005F喂!给我站住!\x02",
)
CloseMessageWindow()
def lambda_96D():
OP_6D(5748, -175, 18851, 1000)
ExitThread()
QueueWorkItem(0xC, 1, lambda_96D)
def lambda_985():
OP_8E(0xFE, 0x166C, 0x0, 0x50B4, 0x1B58, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_985)
def lambda_9A0():
OP_96(0xFE, 0x18EC, 0xFFFFFF38, 0x42A4, 0x5DC, 0x1B58)
ExitThread()
QueueWorkItem(0xB, 1, lambda_9A0)
WaitChrThread(0x101, 0x1)
WaitChrThread(0xB, 0x1)
OP_8E(0x102, 0x10CC, 0xC8, 0x5974, 0xBB8, 0x0)
OP_51(0xD, 0x1, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x1), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x1), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xD, 0x2, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x2), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x2), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xD, 0x3, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x3), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x3), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_69(0xD, 0x3E8)
OP_6A(0xD)
OP_43(0x101, 0x1, 0x0, 0x9)
OP_43(0xB, 0x1, 0x0, 0x8)
OP_43(0x102, 0x1, 0x0, 0x7)
WaitChrThread(0x102, 0x1)
Sleep(1800)
Fade(1000)
OP_44(0xA, 0xFF)
OP_44(0x9, 0xFF)
TurnDirection(0x9, 0x102, 0)
TurnDirection(0xA, 0x102, 0)
OP_6A(0x0)
ClearMapFlags(0x1)
OP_6D(4800, 185, 22555, 0)
OP_0D()
ChrTalk(
0xA,
(
"那个,大哥哥……\x01",
"这是怎么回事呀?\x02",
)
)
CloseMessageWindow()
SetChrFlags(0xB, 0x40)
SetChrPos(0x101, -4998, 0, 29194, 0)
SetChrPos(0xB, -1998, 0, 29194, 0)
def lambda_AC4():
OP_8E(0xFE, 0x2818, 0x0, 0x7D00, 0x1770, 0x0)
ExitThread()
QueueWorkItem(0xB, 1, lambda_AC4)
def lambda_ADF():
OP_8E(0xFE, 0x256C, 0x0, 0x7C06, 0x1B58, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_ADF)
ChrTalk(
0x9,
(
"难道克拉姆\x01",
"又恶作剧了吗~?\x02",
)
)
CloseMessageWindow()
OP_8C(0x102, 90, 400)
ChrTalk(
0x102,
(
"#019F啊,那个……\x01",
"来打扰你们真是不好意思了。\x02",
)
)
CloseMessageWindow()
WaitChrThread(0xB, 0x1)
WaitChrThread(0x101, 0x1)
OP_44(0x101, 0xFF)
OP_44(0xB, 0xFF)
SetChrFlags(0xB, 0x4)
SetChrPos(0xB, 8300, 200, 31590, 90)
SetChrPos(0x101, 7700, 0, 31590, 90)
def lambda_B8E():
label("loc_B8E")
OP_99(0xFE, 0x0, 0x7, 0xBB8)
OP_48()
Jump("loc_B8E")
QueueWorkItem2(0xB, 1, lambda_B8E)
ChrTalk(
0xB,
(
"#776F#4P我不要~!\x01",
"放开我!快点放开我~!\x02",
)
)
CloseMessageWindow()
Sleep(100)
Fade(1000)
def lambda_BD2():
TurnDirection(0xFE, 0x101, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_BD2)
def lambda_BE0():
TurnDirection(0xFE, 0x101, 400)
ExitThread()
QueueWorkItem(0x102, 1, lambda_BE0)
def lambda_BEE():
TurnDirection(0xFE, 0x101, 400)
ExitThread()
QueueWorkItem(0xA, 1, lambda_BEE)
OP_6D(8880, 0, 32490, 0)
OP_67(0, 9500, -10000, 0)
OP_6B(2640, 0)
OP_6C(45000, 0)
OP_6E(262, 0)
OP_0D()
OP_62(0xB, 0x0, 1700, 0x28, 0x2B, 0x64, 0x3)
ChrTalk(
0xB,
"#776F#2P我要去告发你虐待儿童!\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#009F#3P什~什么虐待儿童啊?\x01",
"小小年纪竟然说出这种话来!\x02\x03",
"我的徽章呢?\x01",
"马上还给我!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#776F#2P说我拿了你的东西,\x01",
"你有证据吗!?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#006F#3P证据倒是没有……\x01",
"不过,让我调查一下就知道了!\x02",
)
)
CloseMessageWindow()
def lambda_D49():
OP_8C(0xFE, 270, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_D49)
OP_97(0xB, 0x1E5A, 0x7B66, 0x2BF20, 0xFA0, 0x3)
OP_44(0xB, 0xFF)
SetChrSubChip(0xB, 0)
OP_9E(0xB, 0x1E, 0x0, 0x190, 0x1388)
ChrTalk(
0xB,
(
"#778F#3P哎呀呀……!\x02\x03",
"你、你在摸哪里啊!?\x01",
"好~痒~痒~啊!\x02\x03",
"大变态!粗暴女!\x02",
)
)
CloseMessageWindow()
OP_9E(0xB, 0x1E, 0x0, 0x258, 0x1388)
ChrTalk(
0x101,
(
"#006F#2P行啦行啦,反抗是没有用的,\x01",
"还是乖乖地交出来吧……\x02",
)
)
CloseMessageWindow()
OP_72(0x0, 0x10)
OP_6F(0x0, 20)
SetChrPos(0x136, 10, 0, 30720, 90)
ClearChrFlags(0x136, 0x80)
NpcTalk(
0x136,
"女孩的声音",
"#2P基库!\x02",
)
CloseMessageWindow()
OP_62(0x101, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
OP_6D(9390, 0, 34800, 800)
Sleep(500)
ClearChrFlags(0xC, 0x80)
SetChrFlags(0xC, 0x40)
SetChrPos(0xC, 0, 6000, 31900, 0)
OP_22(0x8C, 0x0, 0x64)
OP_8E(0xC, 0x1E14, 0x2BC, 0x7B66, 0x4E20, 0x0)
OP_62(0x101, 0x0, 2000, 0x28, 0x2B, 0x64, 0x3)
def lambda_EDE():
OP_95(0xFE, 0x12C, 0x0, 0x0, 0x1F4, 0x1388)
ExitThread()
QueueWorkItem(0x101, 2, lambda_EDE)
def lambda_EFC():
OP_95(0xFE, 0x12C, 0x0, 0x0, 0x1F4, 0x1388)
ExitThread()
QueueWorkItem(0xB, 2, lambda_EFC)
OP_8E(0xC, 0x36A6, 0x1770, 0x8278, 0x4E20, 0x0)
ChrTalk(
0x101,
(
"#004F#2P哇哇~!?\x02\x03",
"刚、刚才那个东西是……\x02",
)
)
CloseMessageWindow()
def lambda_F5D():
TurnDirection(0xFE, 0x136, 400)
ExitThread()
QueueWorkItem(0xB, 1, lambda_F5D)
def lambda_F6B():
TurnDirection(0xFE, 0x136, 400)
ExitThread()
QueueWorkItem(0x9, 1, lambda_F6B)
def lambda_F79():
TurnDirection(0xFE, 0x136, 400)
ExitThread()
QueueWorkItem(0xA, 1, lambda_F79)
def lambda_F87():
TurnDirection(0xFE, 0x136, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_F87)
def lambda_F95():
TurnDirection(0xFE, 0x136, 400)
ExitThread()
QueueWorkItem(0x102, 1, lambda_F95)
def lambda_FA3():
OP_6D(3420, 0, 32210, 3000)
ExitThread()
QueueWorkItem(0x0, 2, lambda_FA3)
def lambda_FBB():
OP_6C(315000, 3000)
ExitThread()
QueueWorkItem(0x0, 3, lambda_FBB)
def lambda_FCB():
OP_6B(2800, 3000)
ExitThread()
QueueWorkItem(0x1, 3, lambda_FCB)
WaitChrThread(0x0, 0x2)
OP_92(0xC, 0x136, 0x1388, 0x2710, 0x0)
OP_92(0xC, 0x136, 0xFA0, 0x1F40, 0x0)
OP_92(0xC, 0x136, 0xBB8, 0x1770, 0x0)
OP_92(0xC, 0x136, 0x7D0, 0xBB8, 0x0)
OP_8E(0xC, 0xA, 0x3E8, 0x7B0C, 0x5DC, 0x0)
def lambda_102C():
OP_8C(0xFE, 135, 200)
ExitThread()
QueueWorkItem(0xC, 3, lambda_102C)
SetChrChipByIndex(0x136, 5)
SetChrSubChip(0x136, 3)
OP_8F(0xC, 0xFFFFFFCE, 0xC8, 0x7ADA, 0x3E8, 0x0)
WaitChrThread(0xC, 0x3)
Sleep(100)
Fade(250)
SetChrFlags(0xC, 0x80)
SetChrSubChip(0x136, 1)
SetChrFlags(0x136, 0x20)
OP_0D()
Sleep(500)
NpcTalk(
0x136,
"穿制服的少女",
(
"#046F请放了那个孩子!\x02\x03",
"如果再对他动粗的话,\x01",
"就别怪我不客……\x02\x03",
"#044F………………哎呀?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#004F啊,你不就是……\x02",
)
CloseMessageWindow()
NpcTalk(
0x136,
"穿制服的少女",
"#044F在玛诺利亚村见过的……\x02",
)
CloseMessageWindow()
NpcTalk(
0xC,
"白隼",
"#310F#1P啾?\x02",
)
CloseMessageWindow()
OP_96(0xB, 0x1A40, 0x0, 0x7B0C, 0x1F4, 0x1388)
ClearChrFlags(0xB, 0x4)
OP_8E(0xB, 0x15EA, 0x0, 0x7BD4, 0x1B58, 0x0)
ChrTalk(
0xB,
(
"#775F救救我……科洛丝姐姐!\x02\x03",
"我、我什么事情也没干,\x01",
"这姐姐就无缘无故地把我抓住了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#005F什、什么事情也没干?\x01",
"明明就是你把我的徽章给偷走的!\x02",
)
)
CloseMessageWindow()
TurnDirection(0xB, 0x101, 400)
ChrTalk(
0xB,
(
"#770F#1P嘿嘿,证据呢?\x01",
"有本事就拿出证据来!\x02",
)
)
CloseMessageWindow()
def lambda_1254():
OP_8E(0xFE, 0x17B6, 0x0, 0x7C9C, 0x1388, 0x0)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1254)
Sleep(200)
OP_8F(0xB, 0x1194, 0x0, 0x78D2, 0x1388, 0x0)
TurnDirection(0x101, 0xB, 400)
ChrTalk(
0xB,
"#774F#1P啊,可不要再挠我痒痒了……\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
"#009F你这个调皮蛋……\x02",
)
CloseMessageWindow()
def lambda_12CD():
OP_6D(2050, 0, 30810, 2000)
ExitThread()
QueueWorkItem(0x101, 1, lambda_12CD)
def lambda_12E5():
OP_8E(0xFE, 0xADC, 0x0, 0x6EC8, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x102, 1, lambda_12E5)
Sleep(300)
def lambda_1305():
OP_8E(0xFE, 0x1040, 0x0, 0x6F7C, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xA, 1, lambda_1305)
Sleep(400)
def lambda_1325():
OP_8E(0xFE, 0x384, 0x0, 0x71D4, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0x9, 1, lambda_1325)
Sleep(300)
WaitChrThread(0x102, 0x1)
TurnDirection(0x102, 0x136, 0)
ChrTalk(
0x102,
"#010F你好,我们又见面了。\x02",
)
CloseMessageWindow()
OP_8C(0x136, 135, 400)
ChrTalk(
0x136,
(
"#045F啊,上次给你们添麻烦了……\x02\x03",
"刚才真不好意思,\x01",
"我还以为是强盗……\x02\x03",
"#043F啊,对了……\x01",
"究竟发生了什么事呢?\x02",
)
)
CloseMessageWindow()
WaitChrThread(0xA, 0x1)
ChrTalk(
0xA,
(
"科洛丝姐姐,\x01",
"不用问也能猜到啦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"肯定是克拉姆\x01",
"又惹出什么祸来了。\x02",
)
)
CloseMessageWindow()
WaitChrThread(0x9, 0x1)
ChrTalk(
0x9,
(
"嗯……姐姐~\x01",
"苹果派做好了吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x136,
(
"#041F啊,再等一下好吗,\x01",
"苹果派要烤一下才能吃的哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#005F这个臭小鬼!\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
"#776F#1P粗暴女!\x02",
)
CloseMessageWindow()
ChrTalk(
0xA,
(
"克拉姆你也真是的。\x01",
"什么时候才能不这么孩子气啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x9,
"苹果派,好了没有啊~\x02",
)
CloseMessageWindow()
OP_62(0x102, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1)
OP_22(0x31, 0x0, 0x64)
OP_62(0x136, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1)
OP_22(0x31, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0x102,
(
"#019F……总觉得\x01",
"情况好像变得越来越复杂了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x136,
(
"#045F啊,呵呵……\x01",
"我也觉得是呢……\x02",
)
)
CloseMessageWindow()
NpcTalk(
0xC,
"白隼",
"#311F#2P啾。\x02",
)
CloseMessageWindow()
NpcTalk(
0x8,
"女性的声音",
(
"#3P哎呀哎呀~\x01",
"怎么外面这么吵呢……\x02",
)
)
CloseMessageWindow()
ClearChrFlags(0x8, 0x80)
ClearChrFlags(0x8, 0x4)
OP_8E(0x8, 0x0, 0x0, 0x7C38, 0x5DC, 0x0)
OP_8C(0x8, 90, 400)
def lambda_1635():
label("loc_1635")
TurnDirection(0xFE, 0x8, 400)
OP_48()
Jump("loc_1635")
QueueWorkItem2(0xB, 1, lambda_1635)
def lambda_1646():
label("loc_1646")
TurnDirection(0xFE, 0x8, 400)
OP_48()
Jump("loc_1646")
QueueWorkItem2(0x9, 1, lambda_1646)
def lambda_1657():
label("loc_1657")
TurnDirection(0xFE, 0x8, 400)
OP_48()
Jump("loc_1657")
QueueWorkItem2(0xA, 1, lambda_1657)
def lambda_1668():
label("loc_1668")
TurnDirection(0xFE, 0x8, 400)
OP_48()
Jump("loc_1668")
QueueWorkItem2(0x101, 1, lambda_1668)
def lambda_1679():
label("loc_1679")
TurnDirection(0xFE, 0x8, 400)
OP_48()
Jump("loc_1679")
QueueWorkItem2(0x102, 1, lambda_1679)
def lambda_168A():
label("loc_168A")
TurnDirection(0xFE, 0x8, 400)
OP_48()
Jump("loc_168A")
QueueWorkItem2(0x136, 1, lambda_168A)
ChrTalk(
0x136,
"#044F特蕾莎老师!\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#750F#1P虽然详细情况我不太清楚……\x02\x03",
"不过看起来,\x01",
"又是克拉姆做了什么恶作剧吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#772F才、才不会呢。\x01",
"我可是什么都没干哦。\x02\x03",
"老师你可不要听\x01",
"这个粗暴的姐姐乱说话哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#009F谁、谁是粗暴的姐姐啊!?\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
"#750F#1P哎呀哎呀~真是伤脑筋。\x02",
)
CloseMessageWindow()
OP_8E(0x8, 0xA14, 0x0, 0x771A, 0x7D0, 0x0)
OP_8C(0x8, 90, 400)
ChrTalk(
0x8,
(
"#750F#1P克拉姆……\x01",
"你真的什么都没有做吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
"#771F嗯,那当然啦!\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
"#750F#1P你敢向空之女神发誓吗?\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
"#775F当、当然敢啦!\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#754F#1P是吗……\x02\x03",
"#750F刚才我在你们的房间里\x01",
"捡到了一枚徽章之类的东西……\x02\x03",
"那不是你的东西吧?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#772F咦,不可能……\x01",
"我明明塞进自己裤袋里面的……\x02",
)
)
CloseMessageWindow()
OP_62(0xB, 0x0, 1700, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0xB,
"#774F啊……!\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
"#005F果然是你~!\x02",
)
CloseMessageWindow()
ChrTalk(
0x136,
"#044F是吗……\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F老师套话还真是有一手……\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#752F#1P克拉姆……\x01",
"这下你无话可说了吧。\x02\x03",
"马上把你拿走的东西\x01",
"还给这位姐姐。\x02",
)
)
CloseMessageWindow()
OP_62(0xB, 0x0, 1700, 0xE, 0xF, 0xFA, 0x2)
OP_22(0x31, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0xB,
"#773F呜呜呜呜呜呜呜……\x02",
)
CloseMessageWindow()
OP_44(0xB, 0x1)
TurnDirection(0xB, 0x101, 400)
ChrTalk(
0xB,
(
"#776F#1P算我倒霉!\x01",
"还你就还你!\x02",
)
)
CloseMessageWindow()
OP_44(0x101, 0x1)
OP_44(0x102, 0x1)
OP_44(0xA, 0x1)
OP_44(0x9, 0x1)
OP_44(0x136, 0x1)
def lambda_1A47():
label("loc_1A47")
TurnDirection(0xFE, 0xB, 0)
OP_48()
Jump("loc_1A47")
QueueWorkItem2(0x101, 1, lambda_1A47)
def lambda_1A58():
label("loc_1A58")
TurnDirection(0xFE, 0xB, 0)
OP_48()
Jump("loc_1A58")
QueueWorkItem2(0x8, 1, lambda_1A58)
def lambda_1A69():
label("loc_1A69")
TurnDirection(0xFE, 0xB, 0)
OP_48()
Jump("loc_1A69")
QueueWorkItem2(0x102, 1, lambda_1A69)
def lambda_1A7A():
label("loc_1A7A")
TurnDirection(0xFE, 0xB, 0)
OP_48()
Jump("loc_1A7A")
QueueWorkItem2(0xA, 1, lambda_1A7A)
def lambda_1A8B():
label("loc_1A8B")
TurnDirection(0xFE, 0xB, 0)
OP_48()
Jump("loc_1A8B")
QueueWorkItem2(0x9, 1, lambda_1A8B)
def lambda_1A9C():
label("loc_1A9C")
TurnDirection(0xFE, 0xB, 0)
OP_48()
Jump("loc_1A9C")
QueueWorkItem2(0x136, 1, lambda_1A9C)
OP_92(0xB, 0x101, 0x4B0, 0xFA0, 0x0)
FadeToDark(300, 0, 100)
SetMessageWindowPos(72, 320, 56, 3)
SetChrName("")
AnonymousTalk(
(
scpstr(SCPSTR_CODE_COLOR, 0x5),
"克拉姆将徽章扔在地上。\x02",
)
)
CloseMessageWindow()
OP_56(0x0)
SetMessageWindowPos(72, 320, 56, 3)
FadeToBright(300, 0)
OP_3E(0x35C, 1)
OP_8F(0xB, 0x1194, 0x0, 0x78D2, 0xFA0, 0x0)
ChrTalk(
0x101,
"#004F啊……\x02",
)
CloseMessageWindow()
OP_95(0xB, 0x0, 0x0, 0x0, 0x320, 0x1770)
ChrTalk(
0xB,
"#772F#1P哼,你好样的!\x02",
)
CloseMessageWindow()
OP_8C(0xB, 180, 400)
OP_8E(0xB, 0x1405, 0x0, 0x709E, 0x1770, 0x0)
OP_8E(0xB, 0x1107, 0x0, 0x50A0, 0x1B58, 0x0)
ChrTalk(
0x136,
"#043F啊,克拉姆!\x02",
)
CloseMessageWindow()
ChrTalk(
0x8,
(
"#750F#1P不要紧,让他自己清醒一下也好。\x01",
"一会儿就会回来的。\x02\x03",
"#750F啊,对了……\x01",
"大家都不要站在这里说话了。\x02",
)
)
CloseMessageWindow()
OP_44(0x8, 0xFF)
TurnDirection(0x8, 0x101, 400)
Sleep(400)
OP_44(0x101, 0xFF)
OP_44(0x102, 0xFF)
OP_44(0x136, 0xFF)
OP_44(0xA, 0x1)
OP_44(0x9, 0x1)
def lambda_1C5D():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0x101, 1, lambda_1C5D)
def lambda_1C6B():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0x102, 1, lambda_1C6B)
def lambda_1C79():
TurnDirection(0xFE, 0x8, 400)
ExitThread()
QueueWorkItem(0x136, 1, lambda_1C79)
Sleep(400)
ChrTalk(
0x8,
(
"#750F#1P详细的情况,\x01",
"我们到屋子里边喝茶边谈吧?\x02",
)
)
CloseMessageWindow()
FadeToDark(1500, 0, -1)
OP_0D()
OP_A2(0x3FA)
NewScene("ED6_DT01/T2410 ._SN", 100, 0, 0)
IdleLoop()
label("loc_1CE3")
Return()
# Function_6_507 end
def Function_7_1CE4(): pass
label("Function_7_1CE4")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_1D30")
OP_51(0xD, 0x1, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x1), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x1), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xD, 0x2, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x2), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x2), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_51(0xD, 0x3, (scpexpr(EXPR_GET_CHR_WORK, 0x101, 0x3), scpexpr(EXPR_GET_CHR_WORK, 0xB, 0x3), scpexpr(EXPR_ADD), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_IDIV), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
OP_48()
Jump("Function_7_1CE4")
label("loc_1D30")
Return()
# Function_7_1CE4 end
def Function_8_1D31(): pass
label("Function_8_1D31")
OP_8E(0xFE, 0xE9C, 0xFFFFFF38, 0x425E, 0x1B58, 0x0)
OP_96(0xFE, 0x3F2, 0xC8, 0x425E, 0x7D0, 0x1B58)
OP_8E(0xFE, 0xFFFFF7F4, 0x64, 0x3764, 0x1B58, 0x0)
OP_8E(0xFE, 0xFFFFEE6C, 0xFFFFFF38, 0x3912, 0x1B58, 0x0)
def lambda_1D8A():
label("loc_1D8A")
TurnDirection(0xFE, 0x101, 0)
OP_48()
Jump("loc_1D8A")
QueueWorkItem2(0xFE, 2, lambda_1D8A)
Sleep(1600)
OP_8F(0xFE, 0xFFFFEDC2, 0xFFFFFF9C, 0x4880, 0x1B58, 0x0)
Sleep(1200)
OP_8F(0xFE, 0xFFFFED54, 0xFFFFFF38, 0x4F2E, 0x1B58, 0x0)
Sleep(500)
OP_44(0xFE, 0x2)
OP_8E(0xFE, 0xFFFFEC6E, 0x0, 0x7C06, 0x2AF8, 0x0)
Return()
# Function_8_1D31 end
def Function_9_1DE5(): pass
label("Function_9_1DE5")
Sleep(500)
OP_8E(0xFE, 0x4D8, 0x0, 0x532A, 0x1B58, 0x0)
OP_8E(0xFE, 0xFFFFF984, 0xC8, 0x4B28, 0x1B58, 0x0)
OP_96(0xFE, 0xFFFFF38A, 0xFFFFFF38, 0x4902, 0x7D0, 0x1B58)
Sleep(1000)
OP_8F(0xFE, 0xFFFFF3D0, 0xFFFFFF9C, 0x3796, 0x1B58, 0x0)
Sleep(1000)
OP_8F(0xFE, 0xFFFFF3D0, 0xFFFFFF9C, 0x3796, 0x1B58, 0x0)
OP_8F(0xFE, 0xFFFFEE6C, 0xFFFFFF38, 0x3912, 0x1B58, 0x0)
Sleep(500)
OP_44(0x102, 0xFF)
OP_44(0xFE, 0x2)
OP_8E(0xFE, 0xFFFFEC6E, 0x0, 0x7C06, 0x2AF8, 0x0)
Return()
# Function_9_1DE5 end
def Function_10_1E91(): pass
label("Function_10_1E91")
EventBegin(0x0)
OP_6D(-1130, 80, 31130, 0)
OP_67(0, 9500, -10000, 0)
OP_6B(2800, 0)
OP_6C(315000, 0)
OP_6E(262, 0)
SetChrFlags(0x101, 0x80)
SetChrFlags(0x102, 0x80)
SetChrFlags(0x136, 0x80)
SetChrPos(0x101, 0, 0, 32900, 0)
SetChrPos(0x102, 0, 0, 32900, 0)
SetChrPos(0x136, 0, 0, 32900, 0)
SetChrFlags(0x8, 0x80)
FadeToBright(1000, 0)
OP_0D()
OP_70(0x0, 0x14)
OP_73(0x0)
OP_43(0x101, 0x1, 0x0, 0xB)
OP_43(0x102, 0x1, 0x0, 0xC)
OP_43(0x136, 0x1, 0x0, 0xD)
WaitChrThread(0x136, 0x1)
ChrTalk(
0x101,
(
"#000F嗯……特蕾莎院长\x01",
"真是一位非常和蔼可亲的人呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F是啊……\x01",
"感觉就像是母亲那样的亲切。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x136,
(
"#041F#2P呵呵,那些孩子一直都\x01",
"把老师看成是自己的母亲呢。\x02",
)
)
CloseMessageWindow()
OP_22(0x197, 0x0, 0x64)
OP_62(0x101, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
OP_62(0x102, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
OP_62(0x136, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
ClearChrFlags(0xC, 0x80)
SetChrPos(0xC, -5000, 8000, 13000, 0)
OP_44(0x101, 0xFF)
OP_44(0x102, 0xFF)
def lambda_204F():
label("loc_204F")
TurnDirection(0xFE, 0xC, 400)
OP_48()
Jump("loc_204F")
QueueWorkItem2(0x101, 1, lambda_204F)
def lambda_2060():
label("loc_2060")
TurnDirection(0xFE, 0xC, 400)
OP_48()
Jump("loc_2060")
QueueWorkItem2(0x102, 1, lambda_2060)
OP_8C(0x136, 180, 0)
OP_92(0xC, 0x136, 0x1388, 0x2710, 0x0)
OP_22(0x8C, 0x0, 0x64)
OP_92(0xC, 0x136, 0xFA0, 0x1F40, 0x0)
OP_92(0xC, 0x136, 0xBB8, 0x1770, 0x0)
OP_92(0xC, 0x136, 0x7D0, 0xBB8, 0x0)
OP_8E(0xC, 0xFFFFFD8A, 0x3E8, 0x765C, 0x5DC, 0x0)
def lambda_20C9():
OP_8C(0xFE, 90, 200)
ExitThread()
QueueWorkItem(0xC, 3, lambda_20C9)
SetChrChipByIndex(0x136, 5)
SetChrSubChip(0x136, 2)
OP_8C(0x136, 135, 0)
OP_8F(0xC, 0xFFFFFEB6, 0xC8, 0x7788, 0x3E8, 0x0)
Fade(250)
SetChrSubChip(0x136, 0)
SetChrFlags(0xC, 0x80)
SetChrFlags(0x136, 0x20)
OP_0D()
OP_44(0x101, 0xFF)
OP_44(0x102, 0xFF)
ChrTalk(
0x136,
(
"#040F#2P基库。\x01",
"让你久等了吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xC,
"#310F#1P啾。\x02",
)
CloseMessageWindow()
ChrTalk(
0x136,
(
"#040F#2P嗯,是的。\x01",
"他们并不是坏人哦。\x02\x03",
"他们是我的新朋友,\x01",
"艾丝蒂尔和约修亚。\x02\x03",
"你记住他们的名字了吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xC,
"#311F#1P啾!\x02",
)
CloseMessageWindow()
ChrTalk(
0x136,
"#041F#2P呵呵,乖孩子。\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#004F厉、厉害啊。\x01",
"你在和它说话吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x136,
(
"#040F#2P也不能算是说话,\x01",
"不过我能知道它想表达什么。\x02\x03",
"也许是因为\x01",
"大家能够感受到彼此的心情……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#501F哇~……\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#019F这就是所谓的心灵相通吧。\x02",
)
CloseMessageWindow()
ChrTalk(
0x136,
"#041F#2P是啊。\x02",
)
CloseMessageWindow()
OP_92(0x101, 0xC, 0x320, 0x5DC, 0x0)
Sleep(500)
ChrTalk(
0x101,
(
"#501F你好啊,基库。\x02\x03",
"#001F我叫艾丝蒂尔,多多指教哦㈱\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xC,
"#310F#1P啾?\x02",
)
CloseMessageWindow()
OP_62(0xC, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(1200)
OP_63(0xC)
ChrTalk(
0xC,
"#310F#1P啾——\x02",
)
CloseMessageWindow()
def lambda_23F6():
label("loc_23F6")
TurnDirection(0xFE, 0xC, 0)
OP_48()
Jump("loc_23F6")
QueueWorkItem2(0x101, 1, lambda_23F6)
def lambda_2407():
label("loc_2407")
TurnDirection(0xFE, 0xC, 0)
OP_48()
Jump("loc_2407")
QueueWorkItem2(0x102, 1, lambda_2407)
def lambda_2418():
label("loc_2418")
TurnDirection(0xFE, 0xC, 0)
OP_48()
Jump("loc_2418")
QueueWorkItem2(0x136, 1, lambda_2418)
OP_22(0x8C, 0x0, 0x64)
Fade(250)
ClearChrFlags(0xC, 0x80)
ClearChrFlags(0x136, 0x20)
SetChrSubChip(0x136, 2)
OP_0D()
def lambda_2443():
OP_8E(0xFE, 0xFFFFD260, 0x1B58, 0x6EFA, 0xBB8, 0x0)
ExitThread()
QueueWorkItem(0xC, 1, lambda_2443)
Sleep(400)
def lambda_2463():
OP_8E(0xFE, 0xFFFFD260, 0x1B58, 0x6EFA, 0x1770, 0x0)
ExitThread()
QueueWorkItem(0xC, 1, lambda_2463)
Sleep(400)
def lambda_2483():
OP_8E(0xFE, 0xFFFFD260, 0x1B58, 0x6EFA, 0x2EE0, 0x0)
ExitThread()
QueueWorkItem(0xC, 1, lambda_2483)
Sleep(400)
def lambda_24A3():
OP_8E(0xFE, 0xFFFFD260, 0x1B58, 0x6EFA, 0x4E20, 0x0)
ExitThread()
QueueWorkItem(0xC, 1, lambda_24A3)
ChrTalk(
0x101,
(
"#004F啊啊……\x02\x03",
"#007F呜呜……看来我被甩了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#019F哈哈,真是可惜呢。\x02",
)
CloseMessageWindow()
ChrTalk(
0x136,
"#045F#2P呵呵……\x02",
)
CloseMessageWindow()
OP_44(0x136, 0xFF)
SetChrChipByIndex(0x136, 6)
SetChrSubChip(0x136, 0)
OP_8C(0x136, 135, 400)
ChrTalk(
0x136,
(
"#040F#2P话说回来,\x01",
"艾丝蒂尔你们等一下要去卢安市吧。\x02",
)
)
CloseMessageWindow()
OP_44(0x102, 0xFF)
TurnDirection(0x102, 0x136, 200)
OP_44(0x101, 0xFF)
TurnDirection(0x101, 0x136, 400)
ChrTalk(
0x101,
(
"#006F嗯,我们要到那里的\x01",
"协会支部办理转属手续。\x02\x03",
"要不然就不能接受工作了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x136,
(
"#040F#2P卢安的协会支部啊,\x01",
"我去过很那里多次呢。\x02\x03",
"不介意的话,可以让我带你们去吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#501F哇~这真是太好了。\x01",
"我们可是求之不得呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F那样没关系吗?\x01",
"你不赶快回学院的话……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x136,
(
"#040F#2P没关系的。\x01",
"今天我向学院请了一天的假。\x02\x03",
"在天黑之前回去就没事了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#001F那就这样决定啦⊙\x02\x03",
"目的地卢安,出发~!\x02",
)
)
CloseMessageWindow()
SetChrFlags(0xC, 0x80)
OP_31(0x0, 0xFE, 0x0)
OP_31(0x1, 0xFE, 0x0)
OP_31(0x2, 0xFE, 0x0)
OP_31(0x3, 0xFE, 0x0)
OP_31(0x4, 0xFE, 0x0)
OP_31(0x5, 0xFE, 0x0)
OP_31(0x6, 0xFE, 0x0)
OP_31(0x7, 0xFE, 0x0)
FadeToDark(500, 0, -1)
OP_0D()
SetChrPos(0x101, -120, 10, 29740, 180)
SetChrPos(0x102, -120, 10, 29740, 180)
SetChrPos(0x136, -120, 10, 29740, 180)
OP_6D(-120, 10, 29740, 0)
OP_67(0, 9500, -10000, 0)
OP_6B(2800, 0)
OP_6C(45000, 0)
OP_6E(262, 0)
FadeToBright(500, 0)
EventEnd(0x0)
Return()
# Function_10_1E91 end
def Function_11_287F(): pass
label("Function_11_287F")
ClearChrFlags(0x101, 0x80)
OP_8E(0x101, 0x0, 0x0, 0x724C, 0x7D0, 0x0)
def lambda_289E():
label("loc_289E")
TurnDirection(0xFE, 0x136, 400)
OP_48()
Jump("loc_289E")
QueueWorkItem2(0xFE, 2, lambda_289E)
Return()
# Function_11_287F end
def Function_12_28AA(): pass
label("Function_12_28AA")
Sleep(800)
ClearChrFlags(0x102, 0x80)
OP_8E(0xFE, 0x0, 0x0, 0x7BB6, 0x7D0, 0x0)
OP_8E(0xFE, 0x5DC, 0x0, 0x7698, 0x7D0, 0x0)
def lambda_28E2():
label("loc_28E2")
TurnDirection(0xFE, 0x136, 400)
OP_48()
Jump("loc_28E2")
QueueWorkItem2(0xFE, 2, lambda_28E2)
Return()
# Function_12_28AA end
def Function_13_28EE(): pass
label("Function_13_28EE")
Sleep(800)
Sleep(800)
ClearChrFlags(0x136, 0x80)
OP_8E(0xFE, 0x0, 0x0, 0x7BB6, 0x7D0, 0x0)
OP_8C(0xFE, 0, 400)
Sleep(500)
OP_72(0x0, 0x800)
OP_22(0x7, 0x0, 0x64)
OP_6F(0x0, 20)
OP_70(0x0, 0x0)
OP_73(0x0)
OP_71(0x0, 0x800)
Sleep(500)
OP_8C(0xFE, 180, 400)
OP_8E(0x136, 0xFFFFFF7E, 0xA, 0x7850, 0x7D0, 0x0)
OP_8C(0xFE, 135, 400)
Return()
# Function_13_28EE end
def Function_14_2965(): pass
label("Function_14_2965")
EventBegin(0x0)
OP_6D(310, 0, -160, 0)
OP_67(0, 9500, -10000, 0)
OP_6B(3840, 0)
OP_6C(45000, 0)
OP_6E(262, 0)
SetChrFlags(0x101, 0x80)
SetChrFlags(0x102, 0x80)
FadeToBright(2000, 0)
OP_6D(1900, 0, 36890, 10000)
SetMapFlags(0x2000000)
OP_A2(0x3FB)
NewScene("ED6_DT01/T2411 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_14_2965 end
def Function_15_29DA(): pass
label("Function_15_29DA")
EventBegin(0x0)
OP_6D(3610, 0, 34400, 0)
OP_6C(57000, 0)
LoadEffect(0x0, "map\\\\mpfire0.eff")
PlayEffect(0x0, 0xFF, 0xFF, 3450, 2000, 34330, 0, 0, 0, 4000, 4000, 4000, 0xFF, 0, 0, 0, 0)
PlayEffect(0x0, 0xFF, 0xFF, 3650, 1000, 33330, 0, 0, 0, 4000, 4000, 4000, 0xFF, 0, 0, 0, 0)
FadeToBright(2000, 0)
OP_6C(351000, 4000)
SetMapFlags(0x2000000)
OP_A2(0x3FA)
NewScene("ED6_DT01/T2411 ._SN", 100, 0, 0)
IdleLoop()
Return()
# Function_15_29DA end
SaveToFile()
Try(main)
|
from django.contrib import admin
from Auth.models import Company
# Register your models here.
admin.site.register(Company)
|
# Generated by Django 3.0.1 on 2020-01-11 04:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library_app', '0005_remove_album_public'),
]
operations = [
migrations.AddField(
model_name='album',
name='my_image',
field=models.ImageField(blank=True, null=True, upload_to='album_thumnbails'),
),
]
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
cities = ['Delhi-Noida', 'Gurgaon', 'Kolkata', 'Mumbai', 'Pune', 'Ahmedabad-Gandhinagar', 'Bangalore', 'Hyderabad', 'Chennai']
def get_cookie(city):
driver = webdriver.Firefox()
driver.get("http://www.bigbasket.com/choose-city/?next=/cl/fruits-vegetables/")
select = Select(driver.find_element_by_id("ftv-city-selectboxdiv"))
select.select_by_visible_text(city)
driver.add_cookie(driver.get_cookies())
driver.find_element_by_id("skip_explore").click()
cookies = driver.get_cookies()
for cookie in cookies:
if cookie['name'] == '_bb_vid':
print "cookie['%s'] = {'_bb_vid' : %s,}" % (city, cookie['value'])
for city in cities:
try:
get_cookie(city)
except Exception as e:
print "[%s]: %s" % (city, e)
|
import argparse
import os
import matplotlib.pyplot as plt
import numpy
import pandas
# @author: Gursimran Singh
# directories
DIR_NAME = os.path.dirname(os.path.abspath(__file__))
CSVs_DIR_NAME = 'converted_pcaps'
CSV_DIR = os.path.join(DIR_NAME, CSVs_DIR_NAME)
PLOTs_DIR_NAME = 'plotted_graphs'
PLOT_DIR = os.path.join(DIR_NAME, PLOTs_DIR_NAME)
def create_argument_parser():
"""
Creates and sets up a fairly simple argument parser for the script.
:return: arg-parser
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'-p', '--pcap',
action = 'store',
dest = 'pcap',
required = True,
help = 'Path to `pcap` file.'
)
arg_parser.add_argument(
'-c', '--clients',
action = 'store',
dest = 'clients',
required = True,
help = 'MAC addresses of clients to filter out. aa:bb:cc:dd:ee:ff [, gg:hh:ii:jj:kk:ll ...]'
)
arg_parser.add_argument(
'--save_plots',
action = 'store_true',
dest = 'should_save_plots',
help = 'include to save plots'
)
return arg_parser
def convert_pcap_to_csv(filepath: str):
"""
Converts the given pcap file to a csv format file using `tshark`.
Stores only the probe requests.
:param filepath: pcap filepath
:return:
"""
full_path = os.path.abspath(filepath)
if not os.path.exists(full_path):
raise ValueError('Provided pcap does not exist!')
sys_command = 'tshark '
sys_command += '-r ' + str(full_path) + ' '
sys_command += '-2 -R "wlan.fc.type_subtype == 4" '
sys_command += '-E separator=, '
sys_command += '-T fields -e frame.number -e frame.time_epoch -e wlan.sa '
outfile = os.path.join(CSV_DIR, os.path.splitext(os.path.basename(filepath))[0] + '.csv')
sys_command += '>> ' + outfile
# create out-dir
if not os.path.exists(CSV_DIR):
os.mkdir(CSV_DIR)
# write header
file = open(outfile, 'w')
file.write('frame number,frame epoch time,source address' '\n')
file.close()
# execute command
os.system(sys_command)
return outfile
def plot_graphs_from_csv_for_clients(filepath, clients: str, should_save_plots: bool):
"""
Reads a csv file and plots a graph of epoch time differences.
:param should_save_plots:
:param clients:
:param filepath: csv filepath
"""
def strip(value: str):
if not isinstance(value, str):
return value
try:
return_value = value.strip(' ')
except AttributeError:
return value
return return_value
file_df = pandas.read_csv(filepath, sep = ',', header = 0, names = [
'f_number', 'epoch', 'sa',
], converters = {
'f_number': strip,
'epoch': strip,
'sa': strip
})
clients = clients.strip(' \n').split(',')
mac_addrs = [mac_addr.strip(' ').lower() for mac_addr in clients]
for mac_addr in mac_addrs:
boolean_filter = [addr.lower() == mac_addr for addr in file_df.sa]
filtered_df = file_df[boolean_filter]
y = [float(epoch) for epoch in filtered_df.epoch]
y_diff = numpy.diff(y)
figure = plt.figure()
plt.plot(y_diff, label = mac_addr)
plt.ylabel('epoch time difference (sec)')
plt.xlabel('probe request count')
plt.title('periodicity plot for ' + mac_addr)
plt.show()
if should_save_plots:
if not os.path.exists(PLOT_DIR):
os.mkdir(PLOT_DIR)
plotfile = os.path.join(PLOT_DIR, os.path.splitext(os.path.basename(filepath))[0] + '.png')
figure.savefig(plotfile)
if __name__ == "__main__":
parser = create_argument_parser()
args = parser.parse_args()
csv_file = convert_pcap_to_csv(args.pcap)
plot_graphs_from_csv_for_clients(csv_file, args.clients, args.should_save_plots)
# for help:
# python3 periodicity_plotter.py -h
# example:
# python3 periodicity_plotter.py --pcap pcaps/02-11_moto_e.pcapng --clients 80:6c:1b:68:a9:99 --save_plots
|
"""
app.extensions.sqla
~~~~~~~~~~~~~~~~~~~~~~~~~~
拓展Flask-SQLAlchemy模块
新增软删除功能
新增对象CRUD功能
核心部分从一个flask-restful项目中摘录出来,现在已经找不到了
"""
from .db_instance import db
from .errors import CharsTooLong, DuplicateEntry
from .model import Model
from .surrogatepk import SurrogatePK
|
import os
import sys
import OpenSSL
from twisted.application import internet, service
from twisted.internet import reactor, ssl
from twisted.web.wsgi import WSGIResource
from twisted.web.server import Site
from config.config import Config
from ssl_util import CustomOpenSSLContextFactory
import server
config = Config()
ROOT_PATH = os.path.dirname(__file__)
class CustomOpenSSLContextFactory(ssl.DefaultOpenSSLContextFactory):
def __init__(self, privateKeyFileName, certificateChainFileName,
sslmethod=OpenSSL.SSL.SSLv23_METHOD):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateChainFileName: Name of a file containing a certificate chain
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateChainFileName = certificateChainFileName
self.sslmethod = sslmethod
self.cacheContext()
def cacheContext(self):
ctx = OpenSSL.SSL.Context(self.sslmethod)
ctx.use_certificate_chain_file(self.certificateChainFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
ctx.set_options(OpenSSL.SSL.OP_NO_SSLv2)
ctx.set_options(OpenSSL.SSL.OP_NO_SSLv3)
self._context = ctx
def getWebService():
key_path = config.get_ssl_key_path()
cert_path = config.get_ssl_cert_path()
ssl_context = CustomOpenSSLContextFactory(key_path, cert_path)
api_port = int(config.get_environment_api_port())
api_resource = WSGIResource(reactor, reactor.getThreadPool(), server.app)
api_server = Site(api_resource)
return internet.SSLServer(api_port, api_server, ssl_context)
application = service.Application("GARPR webapp")
# attach the service to its parent application
api_service = getWebService()
api_service.setServiceParent(application)
|
import itertools
c = list(map("".join, itertools.permutations('0123456789')))
d = 0
c.sort()
for i in c:
d += 1
if d == 1000000:
print(i)
break
|
from django import forms
class UploadForm(forms.Form):
CHOICES = (('0','Fruits'), ('1','Diseases'),)
mode = forms.ChoiceField(widget=forms.Select, choices=CHOICES,label="")
image=forms.ImageField(label='',)
|
import os
import re
import logging
from typing import Set
import aiohttp
import discord
from discord.ext import commands
from potato_bot.db import DB
from .context import Context
log = logging.getLogger(__name__)
initial_extensions = (
"potato_bot.cogs.utils.errorhandler",
"potato_bot.cogs.utils.responsetracker",
"potato_bot.cogs.accents",
"potato_bot.cogs.bans",
"potato_bot.cogs.chat",
"potato_bot.cogs.fun",
"potato_bot.cogs.meta",
"potato_bot.cogs.potatostation",
"potato_bot.cogs.techadmin",
)
class Bot(commands.Bot):
def __init__(self, **kwargs):
super().__init__(
command_prefix=commands.when_mentioned_or(os.environ["BOT_PREFIX"]),
case_insensitive=True,
allowed_mentions=discord.AllowedMentions(
roles=False, everyone=False, users=True
),
intents=discord.Intents(
guilds=True,
members=True,
bans=True,
emojis=True,
messages=True,
reactions=True,
),
**kwargs,
)
self.db = DB()
self.session = aiohttp.ClientSession()
self.owner_ids: Set[int] = set()
self.loop.run_until_complete(self.critical_setup())
self.loop.create_task(self.setup())
for extension in initial_extensions:
try:
self.load_extension(extension)
except Exception as e:
log.error(f"Error loading {extension}")
self.dispatch("error", e)
async def get_prefix(self, message: discord.Message):
standard = await super().get_prefix(message)
if isinstance(standard, str):
standard = [standard]
if message.guild is None:
standard.append("")
expr = re.compile(
rf"^(?:{'|'.join(re.escape(p) for p in standard)})\s*", re.IGNORECASE
)
if (match := expr.match(message.content)) is not None:
return match[0]
# don't waste effort checking prefixes twice
return []
async def on_ready(self):
print(f"Logged in as {self.user}!")
print(f"Prefix: {os.environ['BOT_PREFIX']}")
async def critical_setup(self):
await self.db.connect()
async def setup(self):
await self.wait_until_ready()
await self._fetch_owners()
async def _fetch_owners(self):
app_info = await self.application_info()
if app_info.team is None:
self.owner_ids = set((app_info.owner.id,))
else:
self.owner_ids = set(m.id for m in app_info.team.members)
async def close(self):
await super().close()
await self.session.close()
await self.db.close()
async def get_context(self, message, *, cls=None):
return await super().get_context(message, cls=cls or Context)
async def on_message(self, message: discord.Message):
if message.author.bot:
return
await self.process_commands(message)
|
import sys
from rosalind_utility import parse_fasta
def calculate_gc_content(string):
''' Calculate GC content
:param string: string to calculate GC content for (string)
:return: GC content (float)
'''
count_gc = string.count("G") + string.count("C")
gc_content = count_gc / len(string)
return gc_content
if __name__ == "__main__":
'''
Given: At most 10 DNA strings in FASTA format (of length at most 1 kbp each).
Return: The ID of the string having the highest GC-content, followed by the GC-content of that string.
'''
input_lines = sys.stdin.read().splitlines()
DNA_strs = parse_fasta(input_lines)
max_GC_content = -1
max_name = ''
for name, seq in DNA_strs.items():
GC_content = calculate_gc_content(seq)
if GC_content > max_GC_content:
max_name = name
max_GC_content = GC_content
print(max_name)
print(max_GC_content * 100)
|
from TGA_analyse_aliq2 import TGA_AnalyseAliq2
# from TGA_analyse_aliq3 import TGA_AnalyseAliq3
import simplejson as json
from TGA_plot_surf import TGA_Plot
import numpy as np
if __name__ == '__main__':
analyse = TGA_AnalyseAliq2()
# analyse3 = TGA_AnalyseAliq3()
analyse.load()
analyse.analyseAll()
# analyse3.load()
# analyse3.analyseAll()
plot = TGA_Plot()
############################################# Simple plots #############################################
# w/ blank subtraction
plot.aliqSimplePlot(analyse.ADS_aliqBlank1_list, analyse.DES_aliqBlank1_list, 'aliq2_blank1_corr', 'Adsorption',\
'Desorption', xmax = 45, ymax = 20, yAxisLabel = 'delta mass (mg)')
plot.aliqSimplePlot(analyse.ADS_aliqBlank2_list, analyse.DES_aliqBlank2_list, 'aliq2_blank2_corr', 'Adsorption',\
'Desorption', xmax = 45, ymax = 20, yAxisLabel = 'delta mass (mg)')
# w/o blank subtraction
plot.aliqSimplePlot(analyse.ADS_aliqRaw_list, analyse.DES_aliqRaw_list, 'aliq2_raw', 'Adsorption',\
'Desorption', xmax = 45, ymax = 20, yAxisLabel = 'delta mass (mg)')
############################################# Many plots #############################################
# compare aliq difference between raw, corected1, and corrected2
plot.ads_aliqDiffPlot(analyse.ADSaliqBlank_compareAll_list[0], analyse.ADSaliqBlank_compareAll_list[1],\
analyse.ADSaliqBlank_compareAll_list[2], 'ADSraw_blank_comparison', 0, 'aliqRaw_aliqBlank1_diff',\
'aliqRaw_aliqBlank2_diff', 'aliqBlank1_aliqBlank2_diff', xmax = 45, ymin = -0.03, ymax = 0.10)
plot.des_aliqDiffPlot(analyse.DESaliqBlank_compareAll_list[0], analyse.DESaliqBlank_compareAll_list[1],\
analyse.DESaliqBlank_compareAll_list[2], 'DESraw_blank_comparison', 0, 'aliqRaw_aliqBlank1_diff',\
'aliqRaw_aliqBlank2_diff', 'aliqBlank1_aliqBlank2_diff', xmax = 45, ymin = -0.03, ymax = 0.15)
# compare all blanks with average
plot.ads_blankManyPlot(analyse.ADSblank1_compareAll_list, 'ADScompare_all_blanks_1', xmax = 45, ymax = 0.2)
plot.des_blankManyPlot(analyse.DESblank1_compareAll_list, 'DEScompare_all_blanks_1', xmax = 45, ymax = 0.2)
plot.ads_blankManyPlot(analyse.ADSblank2_compareAll_list, 'ADScompare_all_blanks_2', xmax = 45, ymax = 0.2)
plot.des_blankManyPlot(analyse.DESblank2_compareAll_list, 'DEScompare_all_blanks_2', xmax = 45, ymax = 0.2)
############################################# Diff plots #############################################
plot.ads_blankDiffPlot(analyse.ADS_blank1_diff_list, analyse.ADS_blank2_diff_list, analyse.ADS_blankDiff_list, 'blanks_diffs_ads',\
'Difference between lines', xmax = 45, ymax = 0.1)
# ############################################# Simple plot: Aliq3 #############################################
# # w/o blank subtraction #
# plot.aliqSimplePlot(analyse.ADSaliq3_ads_list, analyse.DESaliq3_des_list, 'aliq3_analysis_raw', 'Adsorption',\
# 'Desorption', xmax = 45, ymax = 20, yAxisLabel = 'delta mass (mg)')
# # w/ blank subtraction #
# plot.aliqSimplePlot(analyse.ADSaliq3_ads_list_BlankCorr, analyse.DESaliq3_des_list_BlankCorr, 'aliq3_analysis_blank_corr', 'Adsorption',\
# 'Desorption', xmax = 45, ymax = 20, yAxisLabel = 'delta mass (mg)')
# # Note: Replace all the prints by the plots. To see run this like: python run.py > check.txt
# ######################################### Many plots: blank diffs. #########################################
# plot.ads_blankManyPlot([diff['diff'] for diff in analyse.diff_ads_blank], 'many_blank_diffs_ads',\
# xmax = 45, ymax = 0.2)
# plot.des_blankManyPlot([diff['diff'] for diff in analyse.diff_des_blank], 'many_blank_diffs_des',\
# xmax = 45, ymax = 0.2)
# ######################################### Diff plot: blanks and their diffs. #########################################
# index = 0
# for diff in analyse.diff_ads_blank:
# origin1 = "Blank Line 1: %s" %analyse.origin_blanks[diff['i']]
# origin2 = "Blank Line 2: %s" %analyse.origin_blanks[diff['j']]
# plot.ads_blankDiffPlot(analyse.ads_blank[diff['i']], analyse.ads_blank[diff['j']], diff['diff'], 'blanks_diffs_ads',\
# index, '%s'%origin1, '%s'%origin2, 'Difference between lines', xmax = 45, ymax = 1.2)
# index += 1
# index = 0
# for diff in analyse.diff_des_blank:
# origin1 = "Blank Line 1: %s" %analyse.origin_blanks[diff['i']]
# origin2 = "Blank Line 2: %s" %analyse.origin_blanks[diff['j']]
# plot.des_blankDiffPlot(analyse.des_blank[diff['i']], analyse.des_blank[diff['j']], diff['diff'], 'blanks_diffs_des',\
# index, '%s'%origin1, '%s'%origin2, 'Difference between lines', xmax = 45, ymax = 1.2)
# index += 1
# print json.dumps([ {'blank1':analyse.ads_blank[diff['i']], 'blank2':analyse.ads_blank[diff['j']], 'diff':diff['diff']} for diff in analyse.diff_ads_blank])
# print json.dumps([ {'blank1':analyse.des_blank[diff['i']], 'blank2':analyse.des_blank[diff['j']], 'diff':diff['diff']} for diff in analyse.diff_des_blank])
# ######################################### Simple plot: blanks average diffs #########################################
# plot.blankSimplePlot(analyse.average_diff_ads_blank, analyse.average_diff_des_blank, 'blanks_avg_diffs',\
# 'Adsorption', 'Desorption', xmax = 45, ymax = 1.2)
# ######################################### Simple plot: blanks average. #########################################
# plot.blankSimplePlot(analyse.average_ads_blank, analyse.average_des_blank, 'blanks_avg', 'Adsorption', 'Desorption',\
# xmax = 45, ymax = 1.2)
# # plot.ads_aliqManyPlprint self.average_des_blankot(analyse.ads_aliq, 'many_blank_ads_all')
# # plot.des_aliqManyPlot(analyse.des_aliq, 'many_blank_des_all')
# ######################################### Many plots: aliq diffs. #########################################
# ads_diffs = []
# for diff in analyse.diff_ads_aliq:
# ads_diffs.append(diff['diff'])
# plot.ads_aliqManyPlot(ads_diffs, 'aliq_diffs_ads', xmax = 45, ymax = 1.2)
# print "Ads Diff size: %d"%len(ads_diffs)
# des_diffs = []
# for diff in analyse.diff_des_aliq:
# des_diffs.append(diff['diff'])
# print "Des Diff size: %d"%len(des_diffs)
# plot.des_aliqManyPlot(des_diffs, 'aliq_diffs_des', xmax = 45, ymax = 1.2)
# # print json.dumps([ diff['diff'] for diff in analyse.diff_ads_aliq])
# # print json.dumps([ diff['diff'] for diff in analyse.diff_des_aliq])
# ######################################### Diff plot: aliqs and their diffs. #########################################
# index = 0
# for diff in analyse.diff_ads_aliq:
# origin1 = "Aliq Line 1: %s" %analyse.origin_aliqs[diff['i']]
# origin2 = "Aliq Line 2: %s" %analyse.origin_aliqs[diff['j']]
# plot.ads_aliqDiffPlot(analyse.ads_aliq[diff['i']], analyse.ads_aliq[diff['j']], diff['diff'], 'aliqs_diffs_ads',\
# index, '%s'%origin1, '%s'%origin2, 'Difference between lines', xmax = 45, ymax = 20)
# index += 1
# index = 0
# for diff in analyse.diff_des_aliq:
# origin1 = "Aliq Line 1: %s" %analyse.origin_aliqs[diff['i']]
# origin2 = "Aliq Line 2: %s" %analyse.origin_aliqs[diff['j']]
# plot.des_aliqDiffPlot(analyse.des_aliq[diff['i']], analyse.des_aliq[diff['j']], diff['diff'], 'aliqs_diffs_des',\
# index, '%s'%origin1, '%s'%origin2, 'Difference between lines', xmax = 45, ymax = 20)
# index += 1
# print json.dumps([ {'aliq1':analyse.ads_aliq[diff['i']], 'aliq2':analyse.ads_aliq[diff['j']], 'diff':diff['diff']} for diff in analyse.diff_ads_aliq])
# print json.dumps([ {'aliq1':analyse.des_aliq[diff['i']], 'aliq2':analyse.des_aliq[diff['j']], 'diff':diff['diff']} for diff in analyse.diff_des_aliq])
# # ######################################### Many plots: aliqs corrected. #########################################
# # plot.ads_aliqManyPlot(analyse.corrected_ads_aliq, 'aliq_ads_corrected', xmax = 45, ymax = 16)
# # plot.des_aliqManyPlot(analyse.corrected_des_aliq, 'aliq_des_corrected', xmin = 0, xmax = 45, ymin = 10, ymax = 16)
# ######################################### Simple plot: aliqs corrected average. #########################################
# # plot.aliqSimplePlot(analyse.average_corrected_ads_aliq, analyse.average_corrected_des_aliq, 'aliq_corr_avg', 'Adsorption',\
# # 'Desorption', xmax = 45, ymax = 20)
# # |
from typing import Callable, Generic, TypeVar, TYPE_CHECKING
if not TYPE_CHECKING:
reveal_type = print
from example.functions import flow, identity
_ValueType = TypeVar('_ValueType', covariant=True)
_NewValueType = TypeVar('_NewValueType')
# Functor definition:
class Wrapper(Generic[_ValueType]):
def __init__(self, inner_value: _ValueType) -> None:
self._inner_value = inner_value
def map(
self,
function: Callable[[_ValueType], _NewValueType],
) -> 'Wrapper[_NewValueType]':
return Wrapper(function(self._inner_value))
T = TypeVar('T')
N = TypeVar('N')
def map_(
function: Callable[[T], N],
) -> Callable[[Wrapper[T]], Wrapper[N]]:
def factory(instance: Wrapper[T]) -> Wrapper[N]:
return instance.map(function)
return factory
# Example:
def first(arg: int) -> float:
return float(arg)
def second(arg: float) -> str:
return str(arg)
instance = Wrapper(1)
reveal_type(flow(instance, map_(first), map_(second)))
|
# -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/HistoricalArticlesToPdf
from PIL import Image
import sys
class QrcodeTools(object):
def qrcode_to_str(self,qrcode_path):
image = Image.open(qrcode_path)
width = int(image.width * 0.3)
height = int(image.height * 0.3)
gray_img = image.convert('L')
gray_img = gray_img.resize((width, height),Image.ANTIALIAS)
width = gray_img.width
height = gray_img.height
for x in range(0,height):
for y in range(0,width):
pixel = gray_img.getpixel((x,y))
# print(pixel)
if pixel == 0:
sys.stdout.write("▇")
else:
sys.stdout.write(" ")
sys.stdout.write('\n')
sys.stdout.flush()
# f = open("1.txt","w+")
# f.write(w)
# f.flush()
# f.close()
print(image.getpixel((10,10)))
# width = image.width
# height = image.height
# cell = self.get_cell(image,width,height)
# self.get_qrcode(cell,image,width,height)
#计算每个方块的大小像素
def get_cell_size(self,image,x,y,x2,y2):
for j in range(x,x2):
for i in range(y,y2):
pix = image.getpixel((j,i))
if pix[:3]==(255,255,255):
return j - x #每个黑色格子的像素点大小
def get_cell(self,image,width,height):
flag = 0
for y in range(height):
for x in range(width):
print(x,y)
pix = image.getpixel((x,y))
print(pix)
# if pix[:3]==(0,0,0) and flag==0: #出现第一个黑色像素
# x1=x
# flag = 1
# if pix[:3]==(255,255,255) and flag ==1 : #出现第一个白色像素(意味着左上角的标记方块横向结束)
# flag = 2
# cell = self.get_cell_size(image,x1,x1,x,x)
# return cell
def get_qrcode(self,cell,image,width,height):
print(cell)
height = int(height/cell)
width = int(width/cell)
code=''
for y in range(height):
for x in range(width):
pix = image.getpixel((x*cell,y*cell))
if pix[:3]==(0,0,0):
code += '▇'
if pix[:3]==(255,255,255):
code += ' '
code += '\n'
print(code)
|
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5 import QtCore
from PyQt5.QtGui import *
class AppWidget(QWidget):
def __init__(self, parent=None):
super(AppWidget, self).__init__(parent)
horizonlLayout = QHBoxLayout()
self.styleLabel = QLabel('Set Style:')
self.styleComboBox = QComboBox()
# 从QStyleFactory中增加多个显示样式
self.styleComboBox.addItems(QStyleFactory.keys())
# 选择当前窗口风格
index = self.styleComboBox.findText(QApplication.style().objectName(), QtCore.Qt.MatchFixedString)
# 设置当前窗口风格
self.styleComboBox.setCurrentIndex(index)
# 通过comboBox控件选择窗口风格
self.styleComboBox.activated[str].connect(self.handleStyleChanged)
horizonlLayout.addWidget(self.styleLabel)
horizonlLayout.addWidget(self.styleComboBox)
self.setLayout(horizonlLayout)
def handleStyleChanged(self, style):
QApplication.setStyle(style)
if __name__ == '__main__':
app = QApplication(sys.argv)
widgetApp = AppWidget()
widgetApp.show()
sys.exit(app.exec())
|
import math
def v_sub(a, b):
result = []
for i in range(len(a)):
result.append(a[i] - b[i])
return result
def v_len(a):
s = 0;
for i in range(len(a)):
s += a[i]*a[i]
return math.sqrt(s)
def v_cross(v1, v2):
x = v1[1]*v2[2] - v1[2]*v2[1];
y = v2[0]*v1[2] - v2[2]*v1[0];
c = [0]*3
c[2] = v1[0]*v2[1] - v1[1]*v2[0];
c[0] = x;
c[1] = y;
return c
def v_dot(v0, v1):
return (v0[0]*v1[0] + v0[1]*v1[1] + v0[2]*v1[2]);
def v_angle(v0, v1):
vDot = v_dot(v0, v1) / (v_len(v0)*v_len(v1));
if( vDot < -1.0): vDot = -1.0;
if( vDot > 1.0): vDot = 1.0;
return((math.acos( vDot )));
class Normalize(object):
def __init__(self, mean, std):
# self.mean = mean
# self.std = std
nzMean = []
nzStd = []
for i in range(len(mean)):
if (std[i] <= 0.00011):
continue
nzMean.append(mean[i])
nzStd.append(std[i])
self.mean = nzMean
self.std = nzStd
def normalize(self, data):
return (data - self.mean)/self.std
def de_normalize(self, data):
return data*self.std + self.mean
def de_normalize_idx(self, data, index):
return data*self.std[index] + self.mean[index]
def normalize_l(self, data):
result = []
for i in range(len(data)):
result.append((data[i] - self.mean[i])/self.std[i])
return result
def de_normalize_l(self, data):
result = []
for i in range(len(data)):
result.append(data[i]*self.std[i] + self.mean[i])
return result
def size(self):
return len(self.mean)
class DummyCM(object):
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback) :
pass
|
#import sys
#input = sys.stdin.readline
def main():
N, K = map( int, input().split())
W = [ tuple( map( int, input().split())) for _ in range(N)]
dp = [[0,0] for _ in range(K+1)]
dp[1][0] = W[0][0]
dp[1][1] = W[0][0]*W[0][1]
print(dp)
for w, p in W[1:]:
g = w*p
for k in range(K,1,-1):
if dp[k-1][0] == 0:
continue
if (dp[k-1][1] + g)*(dp[k][0]) >= dp[k][1]*(dp[k-1][0] + w):
dp[k][0] = dp[k-1][0]+w
dp[k][1] = dp[k-1][1] +g
if g*(dp[1][0]) > dp[1][1]*w:
dp[1][0] = w
dp[1][1] = g
print(dp)
print( dp[K][1]/dp[K][0])
if __name__ == '__main__':
main()
|
"""
vulkit wrapper generator
Copyright (c) 2016, Ben Russell
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import xml.etree.ElementTree as ET
def ensure(cond):
assert cond
tree = ET.parse(sys.argv[1])
fp_header = open("vulkan.h", "w")
fp_source = open("vulkit_wrapper.c", "w")
commands = []
extensions = {}
cmd_ext_map = {}
root = tree.getroot()
ensure(root.tag == "registry")
def output_to_header(s):
fp_header.write(s + "\n")
def output_to_source(s):
fp_source.write(s + "\n")
def gen_param_prototype(ptype, name):
s = "%s %s" % (ptype, name, )
return s
def gen_command_prototype(rtype, name, params):
s = "%s vulkit_proto_%s (%s);" % (rtype, name, ', '.join(gen_param_prototype(*p) for p in params), )
if name in cmd_ext_map:
s = "#ifdef %s\n%s\n#endif" % (cmd_ext_map[name], s, )
output_to_header(s)
def gen_command_map(rtype, name, params):
s = "#define %s vulkit_proto_%s" % (name, name, )
output_to_header(s)
def gen_command_ifunc(rtype, name, params):
proto = ', '.join(gen_param_prototype(*p) for p in params)
if name == "vkCreateInstance":
proto2 = ', '.join(p[1] for p in params[:])
s = (
"""%s vulkit_proto_%s (%s)
{
PFN_%s create_inst = (PFN_%s)vkGetInstanceProcAddr(NULL, "%s");
VkResult res = create_inst(%s);
memcpy(&vulkit_last_instance, %s, sizeof(VkInstance));
return res;
}
""" ) % (
rtype, name, proto,
name, name, name,
proto2,
params[-1][1],
)
elif name in ["vkEnumerateInstanceLayerProperties", "vkEnumerateInstanceExtensionProperties"]:
s = (
"""static PFN_%s resolve_%s(void)
{
return (PFN_%s)vkGetInstanceProcAddr(NULL, "%s");
}
%s vulkit_proto_%s (%s) __attribute__((ifunc ("resolve_%s")));
""") % (
name, name,
name, name,
rtype, name, proto, name,
)
else:
s = (
"""static PFN_%s resolve_%s(void)
{
return (PFN_%s)vkGetInstanceProcAddr(vulkit_last_instance, "%s");
}
%s vulkit_proto_%s (%s) __attribute__((ifunc ("resolve_%s")));
""") % (
name, name,
name, name,
rtype, name, proto, name,
)
if name in cmd_ext_map:
s = "#ifdef %s\n%s#endif\n" % (cmd_ext_map[name], s, )
output_to_source(s)
for c0 in root:
if c0.tag == "extensions":
for extension in c0:
ensure(extension.tag == "extension")
ext_name = extension.attrib["name"]
ensure(ext_name)
#print((extension.tag, extension.attrib))
for c1 in extension:
ensure(c1.tag == "require")
for c2 in c1:
if c2.tag == "enum":
pass
elif c2.tag == "type":
pass
elif c2.tag == "command":
#print((c2.attrib))
cmd_name = c2.attrib["name"]
ensure(cmd_name)
cmd_ext_map[cmd_name] = ext_name
elif c2.tag == "usage":
pass
else:
print(repr(c2.tag))
ensure(False)
elif c0.tag == "commands":
for command in c0:
ensure(command.tag == "command")
cmd_type = None
cmd_name = None
cmd_params = []
for c1 in command:
if c1.tag == "proto":
ensure(cmd_name == None)
ensure(cmd_type == None)
cmd_type_prefix = c1.text
cmd_type_suffix = None
for c2 in c1:
if c2.tag == "type":
cmd_type = c2.text
cmd_type_suffix = c2.tail
elif c2.tag == "name":
cmd_name = c2.text
else:
assert(False)
ensure(cmd_name != None)
ensure(cmd_type != None)
if cmd_type_prefix == None:
pass
elif cmd_type_prefix in ["const "]:
cmd_type = cmd_type_prefix + cmd_type
else:
print(repr(cmd_type_prefix))
ensure(False)
if cmd_type_suffix == None:
pass
elif cmd_type_suffix in ["* ", "** "]:
cmd_type += " " + cmd_type_suffix[:-1]
elif cmd_type_suffix in [" "]:
pass
else:
print(repr(cmd_type_suffix))
ensure(False)
elif c1.tag == "param":
p_type = None
p_name = None
p_type_prefix = c1.text
p_type_suffix = None
for c2 in c1:
if c2.tag == "type":
p_type = c2.text
p_type_suffix = c2.tail
elif c2.tag == "name":
p_name = c2.text
else:
assert(False)
ensure(p_name != None)
ensure(p_type != None)
if p_type_prefix == None:
pass
elif p_type_prefix in ["const ", "struct "]:
p_type = p_type_prefix + p_type
else:
print(repr(p_type_prefix))
ensure(False)
if p_type_suffix == None:
pass
elif p_type_suffix in ["* ", "** "]:
p_type += " " + p_type_suffix[:-1]
elif p_type_suffix in [" "]:
pass
else:
print(repr(p_type_suffix))
ensure(False)
cmd_params.append((p_type, p_name))
elif c1.tag == "validity":
pass
elif c1.tag == "implicitexternsyncparams":
pass
else:
print(c1.tag)
ensure(False)
commands.append((cmd_type, cmd_name, cmd_params))
def tap_commands(proto_tap):
for c in commands:
proto_tap(*c)
output_to_header(
"""#ifndef VULKIT_VULKAN_H_
#define VULKIT_VULKAN_H_ 1
#ifdef VULKAN_H_
#error "Don't include vulkan/vulkan.h, just include vulkit/vulkan.h"
#else
#include <vulkan/vulkan.h>
#endif
""")
tap_commands(gen_command_prototype)
output_to_header("")
tap_commands(gen_command_map)
output_to_header("")
output_to_header(
"""#endif
""")
output_to_source(
"""#include <string.h>
#define VK_KHR_surface 1
#include <vulkan/vulkan.h>
static VkInstance vulkit_last_instance = VK_NULL_HANDLE;
""")
tap_commands(gen_command_ifunc)
output_to_source("")
|
from tkinter import *
import random
import time
def UP_ (event):
global di
di = 'w'
def DOWN_ (event):
global di
di = 's'
def LEFT_ (event):
global di
di = 'a'
def RIGHT_ (event):
global di
di = 'd'
def RAND_F ():
global t, P, X
flag = 0
while flag == 0:
i, j = random.randint (0, t - 1), random.randint (0, t - 1)
S = [i, j]
flag = 1
for k in range (len (P)):
if P [k] == S: flag = 0
for k in range (len (WALLS)):
if WALLS [k] == S: flag = 0
return S
#def RAND_W ():
#return m
def WALLS_ ():
global W, X
WALLS = []
for q in range (len (W)):
i, j = W [q][0], W [q][1]
if i [0] == j [0]:
s = min (i [1], j [1])
#for k in range (abs (i [1] - j [1]) + 1): X [i [0]][s + k] = '—'
for k in range (abs (i [1] - j [1]) + 1): WALLS += [[i [0], s + k]]
#X [i [0]][s + k] = '0'
elif i [1] == j [1]:
s = min (i [0], j [0])
for k in range (abs (i [0] - j [0]) + 1): WALLS += [[s + k, i [1]]]
#X [s + k][i [1]] = '0'
return WALLS
print ('Введите сторону поля, от 3 до 16:', end = ' ')
t = input ()
#U = ('3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16')
#flag = 0
#while flag == 0:
#if t in U: flag = 1
#else:
##for i in range (18): print ()
#print ('Попробуйте ещё раз. Число от 3 до 16:', end = ' ')
#t = input ()
t = int (t) + 2
size = 20
h = 100
P = [[random.randint (1, t - 2), random.randint (1, t - 2)]]
#W = open ('WALLS.txt')
#W = W.readlines ()
di = 'o'
lose = 0
X = [0] * t
for i in range (t): X [i] = ['.'] * t
W = [[[0, 0], [0, t - 1]], [[t - 1, 0], [t - 1, t - 1]], [[1, 0], [t - 2, 0]], [[1, t - 1], [t - 2, t - 1]]]
WALLS = WALLS_ ()
for i in range (len (WALLS)): X [WALLS [i][0]][WALLS [i][1]] = '#'
fruit = 0
F = RAND_F ()
X [F [0]][F [1]] = '$'
X [P [0][0]][P [0][1]] = '@'
q = P [-1]
if t > 6:
print ('Включить сложный режим? Y/N:', end = ' ')
ans = input ()
flag = 0
while flag == 0:
if ans == 'Y':
hard = 1
flag = 1
elif ans == 'N':
hard = 0
flag = 1
else:
#for i in range (18): print ()
print ('Попробуйте ещё раз. Y/N:', end = ' ')
ans = input ()
else: hard = 0
print ('Выберите уровень сложности по шкале от 1 до 10:', end = ' ')
z = input ()
Y = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10')
flag = 0
while flag == 0:
if z in Y: flag = 1
else:
#for i in range (18): print ()
print ('Попробуйте ещё раз. Число от 1 до 10:', end = ' ')
z = input ()
z = int (z)
z = (11 - z) / 50
if hard == 1:
k = 5
S = []
u = min (max (0, q [0] - 2), t - 5)
v = min (max (0, q [1] - 2), t - 5)
for i in range (5): S.append (X [u + i][v:v + 5])
#for i in range (6): print ()
#for i in range (5): print (*S [i])
#for i in range (7): print ()
else:
k = t
#for i in range (9 - t // 2 - t % 2): print ()
#for i in range (t): print (*X [i])
#for i in range (9 - t // 2): print ()
root = Tk ()
canvas = Canvas (root, width = 2 * h + k * size, height = 2 * h + k * size)
canvas.pack ()
root.bind ('<Left>', LEFT_)
root.bind ('<Right>', RIGHT_)
root.bind ('<Up>', UP_)
root.bind ('<Down>', DOWN_)
di = 'o'
while lose == 0 and len (P) < t ** 2 - len (WALLS):
#if hard == 1:
#canvas.create_rectangle (h, h, h + 5 * size, h + 5 * size, fill = 'white')
#for i in range (4): canvas.create_line (h, h + (i + 1) * size, h + 5 * size, h + (i + 1) * size)
#for i in range (4): canvas.create_line (h + (i + 1) * size, h, h + (i + 1) * size, h + 5 * size)
#else:
#canvas.create_rectangle (h, h, h + t * size, h + t * size, fill = 'white')
#for i in range (t - 1): canvas.create_line (h, (i + 1) * size + h, t * size + h, (i + 1) * size + h)
#for i in range (t - 1): canvas.create_line ((i + 1) * size + h, h, (i + 1) * size + h, t * size + h)
#di = input ()
#flag = 0
#while flag == 0:
#if di1 != 'w' and di1 != 'a' and di1 != 's' and di1 != 'd':
#if hard == 1:
#for i in range (5): print (*S [i])
#for i in range (7): print ()
#else:
#for i in range (t): print (*X [i])
#for i in range (9 - t // 2): print ()
#print ('Попробуйте ещё раз:', end = ' ')
#di1 = input ()
#if hard == 1:
#for i in range (6): print ()
#else:
#for i in range (9 - t // 2 - t % 2): print ()
#elif di1 == 's' and di != 'w': flag = 1
#elif di1 == 'a' and di != 'd': flag = 1
#elif di1 == 'd' and di != 'a': flag = 1
#elif di1 == 'w' and di != 's': flag = 1
#else:
#if len (P) != 1:
#lose = 1
#flag = 1
#di = di1
time.sleep (z)
if di == 'w': q = [P [-1][0] - 1, P [-1][1]]
elif di == 's': q = [P [-1][0] + 1, P [-1][1]]
elif di == 'a': q = [P [-1][0], P [-1][1] - 1]
elif di == 'd': q = [P [-1][0], P [-1][1] + 1]
P += [q]
#for i in range (len (P)):
#if P [i][0] < 0: P [i][0] = t - 1
#if P [i][0] > t - 1: P [i][0] = 0
#if P [i][1] < 0: P [i][1] = t - 1
#if P [i][1] > t - 1: P [i][1] = 0
X = [0] * t
for i in range (t): X [i] = ['.'] * t
for i in range (len (WALLS)): X [WALLS [i][0]][WALLS [i][1]] = '#'
if P [-1] == F: fruit = 1
#if len (P) < t ** 2 - len (WALLS):
if fruit == 1 and len (P) < t ** 2 - len (WALLS):
F = RAND_F ()
fruit = 0
elif fruit == 1: fruit = 0
else:
l = P [0]
P = P [1:]
X [F [0]][F [1]] = '$'
for i in range (len (P)):
if X [P [i][0]][P [i][1]] != '@' and X [P [i][0]][P [i][1]] != '#': X [P [i][0]][P [i][1]] = '@'
else: lose = 1
canvas.create_rectangle (h, h, h + k * size, h + k * size, fill = 'white')
for i in range (k - 1): canvas.create_line (h, h + (i + 1) * size, h + k * size, h + (i + 1) * size)
for i in range (k - 1): canvas.create_line (h + (i + 1) * size, h, h + (i + 1) * size, h + k * size)
for i in range (len (WALLS)):
canvas.create_rectangle (h + WALLS [i][1] * size, h + WALLS [i][0] * size, h + (WALLS [i][1] + 1) * size, h + (WALLS [i][0] + 1) * size, fill = 'black')
if hard == 1:
S = []
u = min (max (0, q [0] - 2), t - 5)
v = min (max (0, q [1] - 2), t - 5)
for i in range (5): S.append (X [u + i][v:v + 5])
if lose == 0:
#for i in range (5): print (*S [i])
for i in range (len (P)):
if abs (P [i][0] - P [-1][0]) < 3 and abs (P [i][1] - P [-1][1]) < 3:
#if S [i // 5][i % 5] == '@':
canvas.create_oval (size // 4 + h + (2 + P [i][1] - P [-1][1]) * size, size // 4 + h + (2 + P [i][0] - P [-1][0]) * size, 3 * size // 4 + h + (2 + P [i][1] - P [-1][1]) * size, 3 * size // 4 + h + (2 + P [i][0] - P [-1][0]) * size)
#canvas.create_oval (size // 4 + h + P [i][1] * size, size // 4 + h + P [i][0] * size, 3 * size // 4 + h + P [i][1] * size, 3 * size // 4 + h + P [i][0] * size)
#for i in range (7): print ()
#else:
#for i in range (1): print ()
#for i in range (len (WALLS)):
#canvas.create_rectangle ()
else:
if lose == 0:
#for i in range (t): print (*X [i])
canvas.create_oval (size // 4 + h + l [1] * size, size // 4 + h + l [0] * size, 3 * size // 4 + h + l [1] * size, 3 * size // 4 + h + l [0] * size, outline = 'white')
for i in range (len (P)): print (P [i])
print ()
for i in range (len (P)):
canvas.create_oval (size // 4 + h + P [i][1] * size, size // 4 + h + P [i][0] * size, 3 * size // 4 + h + P [i][1] * size, 3 * size // 4 + h + P [i][0] * size)
#for i in range (9 - t // 2): print ()
#else:
#for i in range (1 + t % 2): print ()
canvas.create_rectangle (size // 4 + h + F [1] * size, size // 4 + h + F [0] * size, 3 * size // 4 + h + F [1] * size, 3 * size // 4 + h + F [0] * size)
canvas.update ()
#di = input ()
#for i in range (t // 2 - 2): print ()
if len (P) < t ** 2 - len (WALLS):
print ('Вы проиграли! Ваша длина:', end = ' ')
print (len (P), end = '.')
else: print ('Поздравляю! Вы заняли всё поле и выиграли!')
#for i in range (7): print ()
root.mainloop () |
#!/usr/bin/env python3
# YAPTB Bluetooth keyboard emulator DBUS Service
#
# Adapted from
# www.linuxuser.co.uk/tutorials/emulate-bluetooth-keyboard-with-the-raspberry-p
# https://gist.github.com/ukBaz/a47e71e7b87fbc851b27cde7d1c0fcf0#file-btk_server-py
import os
import signal
import socket
import sys
import dbus
import dbus.service
import dbus.mainloop.qt
import PyQt4.QtCore
class HumanInterfaceDeviceProfile(dbus.service.Object):
"""
BlueZ D-Bus Profile for HID
"""
fd = -1
@dbus.service.method('org.bluez.Profile1',
in_signature='', out_signature='')
def Release(self):
print('Release')
mainloop.quit()
@dbus.service.method('org.bluez.Profile1',
in_signature='oha{sv}', out_signature='')
def NewConnection(self, path, fd, properties):
self.fd = fd.take()
print('NewConnection({}, {})'.format(path, self.fd))
for key in properties.keys():
if key == 'Version' or key == 'Features':
print(' {} = 0x{:04x}'.format(key,
properties[key]))
else:
print(' {} = {}'.format(key, properties[key]))
@dbus.service.method('org.bluez.Profile1',
in_signature='o', out_signature='')
def RequestDisconnection(self, path):
print('RequestDisconnection {}'.format(path))
if self.fd > 0:
os.close(self.fd)
self.fd = -1
#
#define a bluez 5 profile object for our keyboard
#
class BTKbBluezProfile(dbus.service.Object):
fd = -1
@dbus.service.method("org.bluez.Profile1",
in_signature="", out_signature="")
def Release(self):
print("Release")
mainloop.quit()
@dbus.service.method("org.bluez.Profile1",
in_signature="", out_signature="")
def Cancel(self):
print("Cancel")
@dbus.service.method("org.bluez.Profile1", in_signature="oha{sv}", out_signature="")
def NewConnection(self, path, fd, properties):
self.fd = fd.take()
print("NewConnection(%s, %d)" % (path, self.fd))
for key in properties.keys():
if key == "Version" or key == "Features":
print(" %s = 0x%04x" % (key, properties[key]))
else:
print(" %s = %s" % (key, properties[key]))
@dbus.service.method("org.bluez.Profile1", in_signature="o", out_signature="")
def RequestDisconnection(self, path):
print("RequestDisconnection(%s)" % (path))
if (self.fd > 0):
os.close(self.fd)
self.fd = -1
def __init__(self, bus, path):
dbus.service.Object.__init__(self, bus, path)
#
#create a bluetooth device to emulate a HID keyboard,
# advertize a SDP record using our bluez profile class
#
class BTKbDevice():
#change these constants
MY_ADDRESS="B8:27:EB:42:55:2A"
MY_DEV_NAME="DeskPi_BTKb"
#define some constants
P_CTRL =17 #Service port - must match port configured in SDP record
P_INTR =19 #Service port - must match port configured in SDP record#Interrrupt port
# BlueZ dbus
PROFILE_DBUS_PATH = '/bluez/yaptb/btkb_profile'
ADAPTER_IFACE = 'org.bluez.Adapter1'
DEVICE_INTERFACE = 'org.bluez.Device1'
DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties'
DBUS_OM_IFACE = 'org.freedesktop.DBus.ObjectManager'
# file path of the sdp record to laod
install_dir = os.path.dirname(os.path.realpath(__file__))
SDP_RECORD_PATH = os.path.join(install_dir,
'sdp_record.xml')
# UUID for HID service (1124)
# https://www.bluetooth.com/specifications/assigned-numbers/service-discovery
UUID = '00001124-0000-1000-8000-00805f9b34fb'
def __init__(self, hci=0):
self.scontrol = None
self.ccontrol = None # Socket object for control
self.sinterrupt = None
self.cinterrupt = None # Socket object for interrupt
self.dev_path = '/org/bluez/hci{}'.format(hci)
print('Setting up BT device')
self.bus = dbus.SystemBus()
self.adapter_methods = dbus.Interface(
self.bus.get_object('org.bluez',
self.dev_path),
self.ADAPTER_IFACE)
self.adapter_property = dbus.Interface(
self.bus.get_object('org.bluez',
self.dev_path),
self.DBUS_PROP_IFACE)
self.bus.add_signal_receiver(self.interfaces_added,
dbus_interface=self.DBUS_OM_IFACE,
signal_name='InterfacesAdded')
self.bus.add_signal_receiver(self._properties_changed,
dbus_interface=self.DBUS_PROP_IFACE,
signal_name='PropertiesChanged',
arg0=self.DEVICE_INTERFACE,
path_keyword='path')
print('Configuring for name {}'.format(BTKbDevice.MY_DEV_NAME))
self.config_hid_profile()
# set the Bluetooth device configuration
self.alias = BTKbDevice.MY_DEV_NAME
self.discoverabletimeout = 0
self.discoverable = True
def interfaces_added(self, interface, changed, path):
print(f'interfaces added: {interface} changed: {changed} path: {path}')
def _properties_changed(self, interface, changed, invalidated, path):
print(f'properties changed: {interface} changed: {changed} invalidated: {invalidated} path: {path}')
if self.on_disconnect is not None:
if 'Connected' in changed:
if not changed['Connected']:
self.on_disconnect()
def on_disconnect(self):
print('The client has been "disconnected" (ignoring)')
#self.listen()
@property
def address(self):
"""Return the adapter MAC address."""
return self.adapter_property.Get(self.ADAPTER_IFACE,
'Address')
@property
def powered(self):
"""
power state of the Adapter.
"""
return self.adapter_property.Get(self.ADAPTER_IFACE, 'Powered')
@powered.setter
def powered(self, new_state):
self.adapter_property.Set(self.ADAPTER_IFACE, 'Powered', new_state)
@property
def alias(self):
return self.adapter_property.Get(self.ADAPTER_IFACE,
'Alias')
@alias.setter
def alias(self, new_alias):
self.adapter_property.Set(self.ADAPTER_IFACE,
'Alias',
new_alias)
@property
def discoverabletimeout(self):
"""Discoverable timeout of the Adapter."""
return self.adapter_props.Get(self.ADAPTER_IFACE,
'DiscoverableTimeout')
@discoverabletimeout.setter
def discoverabletimeout(self, new_timeout):
self.adapter_property.Set(self.ADAPTER_IFACE,
'DiscoverableTimeout',
dbus.UInt32(new_timeout))
@property
def discoverable(self):
"""Discoverable state of the Adapter."""
return self.adapter_props.Get(
self.ADAPTER_INTERFACE, 'Discoverable')
@discoverable.setter
def discoverable(self, new_state):
self.adapter_property.Set(self.ADAPTER_IFACE,
'Discoverable',
new_state)
def config_hid_profile(self):
"""
Setup and register HID Profile
"""
print('Configuring Bluez Profile')
service_record = self.read_sdp_service_record()
opts = {
'Role': 'server',
'RequireAuthentication': False,
'RequireAuthorization': False,
'AutoConnect': True,
'ServiceRecord': service_record,
}
manager = dbus.Interface(self.bus.get_object('org.bluez',
'/org/bluez'),
'org.bluez.ProfileManager1')
HumanInterfaceDeviceProfile(self.bus,
BTKbDevice.PROFILE_DBUS_PATH)
manager.RegisterProfile(BTKbDevice.PROFILE_DBUS_PATH,
BTKbDevice.UUID,
opts)
print('Profile registered ')
@staticmethod
def read_sdp_service_record():
"""
Read and return SDP record from a file
:return: (string) SDP record
"""
print('Reading service record')
try:
fh = open(BTKbDevice.SDP_RECORD_PATH, 'r')
except OSError:
sys.exit('Could not open the sdp record. Exiting...')
return fh.read()
def listen(self):
"""
Listen for connections coming from HID client
"""
print('Waiting for connections')
self.scontrol = socket.socket(socket.AF_BLUETOOTH,
socket.SOCK_SEQPACKET,
socket.BTPROTO_L2CAP)
self.scontrol.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sinterrupt = socket.socket(socket.AF_BLUETOOTH,
socket.SOCK_SEQPACKET,
socket.BTPROTO_L2CAP)
self.sinterrupt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.scontrol.bind((self.address, self.P_CTRL))
self.sinterrupt.bind((self.address, self.P_INTR))
# Start listening on the server sockets
self.scontrol.listen(1) # Limit of 1 connection
self.sinterrupt.listen(1)
self.ccontrol, cinfo = self.scontrol.accept()
print('{} connected on the control socket'.format(cinfo[0]))
self.cinterrupt, cinfo = self.sinterrupt.accept()
print('{} connected on the interrupt channel'.format(cinfo[0]))
#send a string to the bluetooth host machine
def send_string(self,message):
print("Sending ", type(message), repr(message))
self.cinterrupt.send(message)
#define a dbus service that emulates a bluetooth keyboard
#this will enable different clients to connect to and use
#the service
class BTKbService(dbus.service.Object):
def __init__(self):
print("Setting up service")
#set up as a dbus service
bus_name=dbus.service.BusName("org.yaptb.btkbservice",bus=dbus.SystemBus())
dbus.service.Object.__init__(self,bus_name,"/org/yaptb/btkbservice")
#create and setup our device
self.device= BTKbDevice();
#start listening for connections
self.device.listen();
@dbus.service.method('org.yaptb.btkbservice', in_signature='yay')
def send_keys(self,modifier_byte,keys):
assert len(keys) == 6, f'should be six keys, was {len(keys)}'
cmd_str=b'\xa1\x01' + bytes([modifier_byte, 0x00] + keys[0:6])
# cmd_str+=chr(0xA1)
# cmd_str+=chr(0x01)
# cmd_str+=chr(modifier_byte)
# cmd_str+=chr(0x00)
# count=0
# for key_code in keys:
# if(count<6):
# cmd_str+=chr(key_code)
# count+=1
self.device.send_string(cmd_str);
#main routine
if __name__ == "__main__":
# we an only run as root
if not os.geteuid() == 0:
sys.exit("Only root can run this script")
dbus.mainloop.qt.DBusQtMainLoop(set_as_default=True)
app = PyQt4.QtCore.QCoreApplication([])
signal.signal(signal.SIGINT, signal.SIG_DFL)
myservice = BTKbService();
sys.exit(app.exec_())
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import os.path
import matplotlib.pyplot as plt
from epics import caget, caput
import time
# load the FSM origin
# Elena Manjavacas IDL to Python transcription
def loadfsmori(filename):
print('Reading /kroot/rel/ao/qfix/data/fsm_origin.dat')
fxpos0 = 0.0
fypos0 = 0.0
pxpos0 = 0.0
pypos0 = 0.0
pos0 = np.fromfile('/kroot/rel/ao/qfix/data/fsm_origin.dat', dtype='f', offset=1)
fxpos0 = pos0[:,0]
fypos0 = pos0[:,1]
pxpos0 = pos0[:,2]
pypos0 = pos0[:,3]
print(' Setting FSM origins for pupil and field')
temp=caput('ao.obfmximo',fxpos0/1000.)
temp=caput('ao.obfmyimo',fypos0/1000.)
temp=caput('ao.obfmxpuo',pxpos0)
temp=caput('ao.obfmypuo',pypos0)
print( 'Current values: ')
print, 'Field x: '+caget("ao.obfmximo")*1000
print, 'Field y: '+caget("ao.obfmyimo")*1000
print, 'Pupil x: '+caget("ao.obfmxpuo")
print, 'Pupil y: '+caget("ao.obfmypuo")
print('.. done')
# trigger an FSM move
status=caput('ao.obfmgoim',1)
|
from hummingbot.client.settings import CONNECTOR_SETTINGS
from hummingbot.core.event.events import TradeFeeType
from hummingbot.client.config.config_methods import new_fee_config_var
def fee_overrides_dict():
all_dict = {}
# all_connector_types = get_exchanges_and_derivatives()
for name, setting in CONNECTOR_SETTINGS.items():
key_suffix = None
if setting.fee_type is TradeFeeType.Percent:
key_suffix = "fee"
elif setting.fee_type is TradeFeeType.FlatFee:
key_suffix = "fee_amount"
maker_key = f"{name}_maker_{key_suffix}"
taker_key = f"{name}_taker_{key_suffix}"
all_dict.update({maker_key: new_fee_config_var(maker_key)})
all_dict.update({taker_key: new_fee_config_var(taker_key)})
return all_dict
fee_overrides_config_map = fee_overrides_dict()
|
#!/usr/bin/python
#Relax Dynamixel servos (set all PWMs to zero).
from dxl_cranex7 import *
#Setup the device
crane= TCraneX7()
crane.Setup()
crane.EnableTorque()
crane.SetPWM({jname:0 for jname in crane.JointNames()})
#crane.DisableTorque()
crane.Quit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'sai27'
import time, uuid
from orm import Model, StringField, BooleanField, FloatField, TextField, IntegerField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(ddl='varchar(50)')
passwd = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
created_at = FloatField(default=time.time)
class Issue(Model):
__table__ = 'issues'
id = IntegerField(primary_key=True)
title = StringField(ddl='varchar(256)')
content = TextField()
content_md5 = StringField(ddl='varchar(50)')
version = StringField(ddl='varchar(32)')
user_id = StringField(ddl='varchar(50)')
status = IntegerField()
class Crash(Model):
__table__ = 'crashs'
id = StringField(primary_key=True, ddl='varchar(50)')
issue_id = StringField(ddl='varchar(50)')
crash_doc = TextField()
app_detail = TextField()
|
from fsm import FSM
from state import *
from transition import *
Char = type("Char", (object,), {})
class RobotCat(Char):
def __init__(self):
self.FSM = FSM(self)
# States
self.FSM.addState("Sleep", Sleep(self.FSM))
self.FSM.addState("Walk", Walk(self.FSM))
self.FSM.addState("Meow", Meow(self.FSM))
# Transitions
self.FSM.addTransition("toSleep", ToSleep("Sleep"))
self.FSM.addTransition("toWalk", ToWalk("Walk"))
self.FSM.addTransition("toMeow", ToMeow("Meow"))
self.FSM.setState("Sleep")
def execute(self):
self.FSM.execute()
|
"""Initialization Module"""
|
cont = total = menor = cont1 = nomem = 0
while True:
nome = str(input('Digite o nome do produto: '))
preço = float(input('Digite o preço do produto: '))
cont1 += 1
if cont1 == 1:
menor = preço
nomem = nome
if preço > 1000:
cont += 1
if preço < menor:
menor = preço
nomem = nome
total += preço
s_ou_n = str(input('Deseja continuar [S/N]: ')).upper().strip()[0]
while s_ou_n not in 'SN':
s_ou_n = str(input('Deseja continuar [S/N]: ')).upper().strip()[0]
if s_ou_n == 'N':
break
print(f'O gasto total será de R${total:.2f}')
print(f'{cont} produto(s) custam mais de R$1000,00')
print(f'O produto mais barato foi a/o {nomem} custando R${menor:.2f}')
|
"""
Crie um programa que leia nome, sexo e idade de várias pressoas,
guardando os dados em um dicionário e todos os dicionários em uma
lista. No final, mostre:
A - Quantas pessoas foram cadastradas.
B - A média de idade do grupo.
C - Uma lista com todas as mulheres.
D - Uma lista com todas as pessoas com idade acima da média.
"""
import math
pessoa = dict()
pessoas = list()
media = 0
while True:
pessoa['nome'] = str(input('Nome: '))
pessoa['sexo'] = str(input('Sexo [M/F]: ')).upper().strip()
pessoa['idade'] = int(input('Idade: '))
# Valida o resultado de sexo apenas pra M ou F
while pessoa['sexo'] not in 'MF':
print('Apenas \033[31m[M/F]\033[m tente novamente')
pessoa['sexo'] = str(input('Sexo : ')).upper().strip()
# Adiciona todas as pessoas na lista pessoas
pessoas.append(pessoa.copy())
media += pessoa['idade']
# Opcao de continuar ou parar o while
op = str(input('Quer continuar? [S/N] ')).upper().strip()
while op not in 'SN':
print('Apenas S/N, tente novamente. ')
op = str(input('Quer continuar? \033[31m[S/N]\033[m ')).upper().strip()
if op in 'N':
print('=-=' * 15)
break
print(f'- O grupo tem {len(pessoas)} pessoas.')
print(f'- A média de idade é de {math.ceil(media/len(pessoas))} anos.')
print(f'- As mulheres cadastradas foram:', end='')
for p in pessoas:
if p['sexo'] == 'F':
print(f'{p["nome"]}', end=' ')
print()
print('- Lista de pessoas que estão acima da média:')
for p in pessoas:
if p['idade'] > media/len(pessoas):
for k, v in p.items():
print(f'{" ":>5} {k}: {v}', end='')
print()
print('<'*8, 'Fim do programa', '>'*8)
|
# -*- coding:utf-8 -*-
class Solution:
def FindNumbersWithSum(self, array, tsum):
"""
输入一个递增排序的数组和一个数字S,在数组中查找两个数,
使得他们的和正好是S,如果有多对数字的和等于S,
输出两个数的乘积最小的。
"""
# write code here
res_list = []
low = 0
high = len(array)-1
while True:
if low >= high:
break
sum = array[low] + array[high]
if sum < tsum:
low += 1
elif sum == tsum:
res_list.append([array[low],array[high]])
return [array[low], array[high]]
# low += 1
# high -= 1
# if low > high:
# break
elif sum > tsum:
high -=1
return [] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from BTrees.OOBTree import OOBTree
import transaction
from ..zeo_conf_wrapper import ZEOConfWrapper
# Functions & classes =========================================================
class DatabaseHandler(object):
"""
Define interfaces to the database, configuration and so on.
Attributes:
conf_path (str): Path to the ZEO client XML configuration.
project_key (str): Project key, which is used to access ZEO.
zeo (obj): :class:`.ZEOConfWrapper` database object.
"""
def __init__(self, conf_path, project_key):
self.conf_path = conf_path
self.project_key = project_key
self.zeo = None
self._reload_zeo()
def _reload_zeo(self):
self.zeo = ZEOConfWrapper(
conf_path=self.conf_path,
project_key=self.project_key
)
def _get_key_or_create(self, key, obj_type=OOBTree):
with transaction.manager:
key_obj = self.zeo.get(key, None)
if key_obj is None:
key_obj = obj_type()
self.zeo[key] = key_obj
return key_obj
|
# coding: utf-8
# In[10]:
import argparse as ag
import ConfigParser
import numpy as np
from Bio.Seq import Seq
import Bio.Alphabet
import Bio.Alphabet.IUPAC
# MPI parallelism
from mpi4py import MPI
import h5py
from copy import copy
np.set_printoptions(threshold='nan')
natoms = ('OP1', 'OP2', 'O1P', 'O2P', 'N4', 'N6', 'O4',
'O6', 'N7', 'N2', 'N3', 'O2', "O3'", "O5'", "O2'", "O4'",)
def get_bounding(R, padding, step):
minXYZ = np.amin(R, axis=0)
maxXYZ = np.amax(R, axis=0)
minXYZ -= (padding + step)
maxXYZ += (padding + step)
shape = np.ceil((maxXYZ - minXYZ) / step).astype(np.int)
minXYZ = adjust_grid(minXYZ, step)
return(minXYZ, shape)
def adjust_grid(c, step, padding=0):
nc = c - padding
nc /= step
nc = np.floor(nc) * step
return nc
def sphere2grid(c, r, step, value=-np.inf):
nc = adjust_grid(c, step)
adj = nc - c
r2x = np.arange(-r - adj[0], r - adj[0] + step, step) ** 2
r2y = np.arange(-r - adj[1], r - adj[1] + step, step) ** 2
r2z = np.arange(-r - adj[2], r - adj[2] + step, step) ** 2
dist2 = r2x[:, None, None] + r2y[:, None] + r2z
nc = nc - dist2.shape[0] / 2.0 * step
vol = (dist2 <= r ** 2.0).astype(np.int)
vol *= value
return (nc, vol)
def hist2grid(c, dat, bins, step):
r = bins[-1]
nc = adjust_grid(c, step)
adj = nc - c
r2x = np.arange(-r - adj[0], r - adj[0] + step, step) ** 2
r2y = np.arange(-r - adj[1], r - adj[1] + step, step) ** 2
r2z = np.arange(-r - adj[2], r - adj[2] + step, step) ** 2
dist2 = r2x[:, None, None] + r2y[:, None] + r2z
grid = np.ndarray(dist2.shape, dtype=np.float)
grid.fill(0.0)
for i in range(len(bins) - 1):
grid[(dist2 > bins[i] ** 2) & (dist2 <= bins[i + 1] ** 2)] = dat[i]
nc = nc - dist2.shape[0] / 2 * step
return (nc, grid)
# http://www.weslack.com/question/1552900000001895550
def submatrix(arr):
x, y, z = np.nonzero(arr)
# Using the smallest and largest x and y indices of nonzero elements,
# we can find the desired rectangular bounds.
# And don't forget to add 1 to the top bound to avoid the fencepost
# problem.
return (
np.array(
[x.min(), y.min(), z.min()]
),
np.array(
[x.max() + 1, y.max() + 1, z.max() + 1]
),
)
def process_residue(tR, padding, step):
RminXYZ, Rshape = get_bounding(tR.getCoords(), padding, step)
Rgrid = np.zeros(Rshape, dtype=np.int)
for A in tR.iterAtoms():
AminXYZ, Agrid = sphere2grid(
A.getCoords(), avdw[A.getElement()], step, 1)
NA = Agrid.shape[0]
adj = (AminXYZ - RminXYZ)
adj = (adj / step).astype(np.int)
x, y, z = adj
Rgrid[x: x + NA, y: y + NA, z: z + NA] += Agrid
np.clip(Rgrid, 0, 1, out=Rgrid)
return (Rgrid, RminXYZ)
def process_atom(A, step):
AminXYZ, Agrid = sphere2grid(
A.getCoords(), avdw[A.getElement()], step, 1)
np.clip(Agrid, 0, 1, out=Agrid)
return (Agrid, AminXYZ)
def process_atom_oddt(A, step):
AminXYZ, Agrid = sphere2grid(
A[1], avdw[A[5][0]], step, 1)
np.clip(Agrid, 0, 1, out=Agrid)
return (Agrid, AminXYZ)
aradii = {
'H': 0.53,
'C': 0.67,
'N': 0.56,
'O': 0.48,
'P': 0.98,
'S': 0.88,
}
avdw = {
'H': 1.2,
'C': 1.7,
'N': 1.55,
'O': 1.52,
'P': 1.8,
'S': 1.8,
}
avdw_ua = {
'H': 2.3,
'C': 3.0,
'N': 2.8,
'O': 2.6,
'P': 2.9,
'S': 2.9,
}
atomschgd = [
'NH2',
'NH1',
'NE',
'NZ',
'NE2',
'ND2',
]
atomsl = {
'OE2': 'X',
'OE1': 'X', # Glutamate
'OD1': 'X',
'OD2': 'X', # Aspartate
'O': 'X', # Peptide bond
'OG': 'X', # Serine
'OG1': 'X', # Threonine
'OH': 'X', # Tyrosine
'SG': 'X', # Cysteine
'NE2': 'HIS', # Histidine, Glutamine
'ND1': 'X', # Histidine
# 'ND2', # Asparagine
# 'NE', # Arginine
'OP1': 'X',
'OP2': 'X', # Phosphate
}
atomsdir = {
'OE1': 'CD',
'OE2': 'CD',
'OD1': 'CG',
'OD2': 'CG',
'O': 'C',
'OG': 'CB',
'OG1': 'CB',
'SG': 'CB',
'OH': 'CZ',
'NE2': {'HIS': 'CE1', 'GLN': 'CD'},
'ND1': 'CE1',
'OP1': 'P',
'OP2': 'P',
'NH1': 'CZ',
'NH2': 'CZ',
'NE': 'CZ',
'NZ': 'CE',
'N': 'CA',
}
# http://stackoverflow.com/questions/2819696/parsing-properties-file-in-python/2819788#2819788
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[asection]\n'
def readline(self):
if self.sechead:
try:
return self.sechead
finally:
self.sechead = None
else:
return self.fp.readline()
def get_box_from_vina(f):
cp = ConfigParser.SafeConfigParser()
cp.readfp(FakeSecHead(open(f)))
cfg = dict(cp.items('asection'))
center = np.array([
cfg['center_x'],
cfg['center_y'],
cfg['center_z'],
],
dtype=np.float)
box = np.array([
cfg['size_x'],
cfg['size_y'],
cfg['size_z'],
],
dtype=np.float)
return (center, box)
atypes = {
("ALA", "CA"): "CA_BCK",
("ALA", "CB"): "C",
("ALA", "C"): "C_BCK",
# ("ALA", "N"): "N",
("ALA", "N"): "NA_BCK",
("ALA", "O"): "OA_BCK",
("ARG", "CA"): "CA_BCK",
("ARG", "CB"): "C",
("ARG", "C"): "C_BCK",
("ARG", "CD"): "C",
("ARG", "CG"): "C",
("ARG", "CZ"): "C",
("ARG", "NE"): "NA",
("ARG", "NH1"): "N",
("ARG", "NH2"): "N",
# ("ARG", "N"): "N",
("ARG", "N"): "NA_BCK",
("ARG", "O"): "OA_BCK",
("ASN", "CA"): "CA_BCK",
("ASN", "CB"): "C",
("ASN", "C"): "C_BCK",
("ASN", "CG"): "C",
("ASN", "ND2"): "N",
# ("ASN", "N"): "N",
("ASN", "N"): "NA_BCK",
("ASN", "OD1"): "OA",
("ASN", "O"): "OA_BCK",
("ASP", "CA"): "CA_BCK",
("ASP", "CB"): "C",
("ASP", "C"): "C_BCK",
("ASP", "CG"): "C",
# ("ASP", "N"): "N",
("ASP", "N"): "NA_BCK",
("ASP", "OD1"): "OA",
("ASP", "OD2"): "OA",
("ASP", "O"): "OA_BCK",
("CYS", "CA"): "CA_BCK",
("CYS", "CB"): "C",
("CYS", "C"): "C_BCK",
# ("CYS", "N"): "N",
("CYS", "N"): "NA_BCK",
("CYS", "O"): "OA_BCK",
("CYS", "SG"): "SA",
("GLN", "CA"): "CA_BCK",
("GLN", "CB"): "C",
("GLN", "C"): "C_BCK",
("GLN", "CD"): "C",
("GLN", "CG"): "C",
("GLN", "NE2"): "N",
# ("GLN", "N"): "N",
("GLN", "N"): "NA_BCK",
("GLN", "OE1"): "OA",
("GLN", "O"): "OA_BCK",
("GLU", "CA"): "CA_BCK",
("GLU", "CB"): "C",
("GLU", "C"): "C_BCK",
("GLU", "CD"): "C",
("GLU", "CG"): "C",
# ("GLU", "N"): "N",
("GLU", "N"): "NA_BCK",
("GLU", "OE1"): "OA",
("GLU", "OE2"): "OA",
("GLU", "O"): "OA_BCK",
("GLY", "CA"): "CA_BCK",
("GLY", "C"): "C_BCK",
# ("GLY", "N"): "N",
("GLY", "N"): "NA_BCK",
("GLY", "O"): "OA_BCK",
("HIS", "CA"): "CA_BCK",
("HIS", "CB"): "C",
("HIS", "C"): "C_BCK",
("HIS", "CD2"): "A",
("HIS", "CE1"): "A",
("HIS", "CG"): "A",
("HIS", "ND1"): "NA",
("HIS", "NE2"): "NA",
# ("HIS", "N"): "N",
("HIS", "N"): "NA_BCK",
("HIS", "O"): "OA_BCK",
("ILE", "CA"): "CA_BCK",
("ILE", "CB"): "C",
("ILE", "C"): "C_BCK",
("ILE", "CD1"): "C",
("ILE", "CG1"): "C",
("ILE", "CG2"): "C",
# ("ILE", "N"): "N",
("ILE", "N"): "NA_BCK",
("ILE", "O"): "OA_BCK",
("LEU", "CA"): "CA_BCK",
("LEU", "CB"): "C",
("LEU", "C"): "C_BCK",
("LEU", "CD1"): "C",
("LEU", "CD2"): "C",
("LEU", "CG"): "C",
# ("LEU", "N"): "N",
("LEU", "N"): "NA_BCK",
("LEU", "O"): "OA_BCK",
("LYS", "CA"): "CA_BCK",
("LYS", "CB"): "C",
("LYS", "C"): "C_BCK",
("LYS", "CD"): "C",
("LYS", "CE"): "C",
("LYS", "CG"): "C",
# ("LYS", "N"): "N",
("LYS", "N"): "NA_BCK",
("LYS", "NZ"): "N",
("LYS", "O"): "OA_BCK",
("MET", "CA"): "CA_BCK",
("MET", "CB"): "C",
("MET", "C"): "C_BCK",
("MET", "CE"): "C",
("MET", "CG"): "C",
# ("MET", "N"): "N",
("MET", "N"): "NA_BCK",
("MET", "O"): "OA_BCK",
("MET", "SD"): "SA",
("PHE", "CA"): "CA_BCK",
("PHE", "CB"): "C",
("PHE", "C"): "C_BCK",
("PHE", "CD1"): "A",
("PHE", "CD2"): "A",
("PHE", "CE1"): "A",
("PHE", "CE2"): "A",
("PHE", "CG"): "A",
("PHE", "CZ"): "A",
# ("PHE", "N"): "N",
("PHE", "N"): "NA_BCK",
("PHE", "O"): "OA_BCK",
("PRO", "CA"): "CA_BCK",
("PRO", "CB"): "C",
("PRO", "C"): "C_BCK",
("PRO", "CD"): "C",
("PRO", "CG"): "C",
("PRO", "N"): "NA_BCK",
# ("PRO", "N"): "NA_BCK",
("PRO", "O"): "OA_BCK",
("SER", "CA"): "CA_BCK",
("SER", "CB"): "C",
("SER", "C"): "C_BCK",
# ("SER", "N"): "N",
("SER", "N"): "NA_BCK",
("SER", "OG"): "OA",
("SER", "O"): "OA_BCK",
("THR", "CA"): "CA_BCK",
("THR", "CB"): "C",
("THR", "C"): "C_BCK",
("THR", "CG2"): "C",
# ("THR", "N"): "N",
("THR", "N"): "NA_BCK",
("THR", "OG1"): "OA",
("THR", "O"): "OA_BCK",
("TRP", "CA"): "CA_BCK",
("TRP", "CB"): "C",
("TRP", "C"): "C_BCK",
("TRP", "CD1"): "A",
("TRP", "CD2"): "A",
("TRP", "CE2"): "A",
("TRP", "CE3"): "A",
("TRP", "CG"): "A",
("TRP", "CH2"): "A",
("TRP", "CZ2"): "A",
("TRP", "CZ3"): "A",
("TRP", "NE1"): "NA",
# ("TRP", "N"): "N",
("TRP", "N"): "NA_BCK",
("TRP", "O"): "OA_BCK",
("TYR", "CA"): "CA_BCK",
("TYR", "CB"): "C",
("TYR", "C"): "C_BCK",
("TYR", "CD1"): "A",
("TYR", "CD2"): "A",
("TYR", "CE1"): "A",
("TYR", "CE2"): "A",
("TYR", "CG"): "A",
("TYR", "CZ"): "A",
# ("TYR", "N"): "N",
("TYR", "N"): "NA_BCK",
("TYR", "OH"): "OA",
("TYR", "O"): "OA_BCK",
("VAL", "CA"): "CA_BCK",
("VAL", "CB"): "C",
("VAL", "C"): "C_BCK",
("VAL", "CG1"): "C",
("VAL", "CG2"): "C",
# ("VAL", "N"): "N",
("VAL", "N"): "NA_BCK",
("VAL", "O"): "OA_BCK",
("NME", "N"): "NA_BCK",
("NME", "C"): "OA_BCK",
("ACE", "CH3"): "C",
("ACE", "O"): "OA_BCK",
("ACE", "C"): "C_BCK",
}
one_let = [
"A", "R", "N", "D", "C", "E", "Q", "G", "H",
"I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V", "BCK", "UNK"]
three_let = [
"ALA", "ARG", "ASN", "ASP", "CYS", "GLU", "GLN", "GLY", "HIS", "ILE",
"LEU", "LYS", "MET", "PHE", "PRO", "SER", "THR", "TRP", "TYR", "VAL",
"BCK", "UNK", "NME", "ACE",
]
def get_args():
"""Parse cli arguments"""
# choices = ['grid', 'score']
parser = ag.ArgumentParser(
description='Grid scripts')
parser.add_argument('-m',
required=True,
dest='Sfn',
metavar='FILE.hdf5',
help='HDF5 file for all matrices')
# parser.add_argument('-t', '--task',
# nargs='+',
# required=True,
# choices=choices,
# metavar='TASK',
# help='Task to do. Available options \
# are: %s' % ", ".join(choices))
parser.add_argument('-o', '--output',
dest='output',
metavar='OUTPUT',
# help='For "render" ans "cluster_to_trj" tasks \
# name of output PNG image of mutiframe PDB file'
)
parser.add_argument('--debug',
action='store_true',
help='Perform profiling')
parser.add_argument('--verbose',
action='store_true',
help='Be verbose')
parser.add_argument('--default_types',
action='store_true',
default=True,
help='Use default set of atom types for Vina')
parser.add_argument('-f',
nargs='+',
type=str,
dest='pdb_list',
metavar='FILE',
help='PDB files')
parser.add_argument('-e', '--exclude',
nargs='*',
type=str,
dest='excl',
metavar='TYPE',
help='Vina types to exclude from scoring')
parser.add_argument('-i', '--include',
nargs='*',
type=str,
dest='incl',
metavar='TYPE',
help='Vina types to exclude from scoring')
parser.add_argument('-s', '--step',
type=float,
dest='step',
metavar='STEP',
default=0.1,
help='Grid step in Angstroms')
parser.add_argument('-p', '--padding',
type=int,
dest='pad',
metavar='PADDING',
default=1,
help='Padding around cell in Angstroms')
parser.add_argument('-c', '--config',
type=str,
dest='config',
metavar='CONFIG',
help='Vina config file')
args = parser.parse_args()
args_dict = vars(args)
return args_dict
def get_args_box():
"""Parse cli arguments"""
parser = ag.ArgumentParser()
parser.add_argument('-o', '--output',
dest='output',
metavar='OUTPUT',
help='For "render" ans "cluster_to_trj" tasks \
name of output PNG image of mutiframe PDB file')
parser.add_argument('--debug',
action='store_true',
help='Perform profiling')
parser.add_argument('--verbose',
action='store_true',
help='Be verbose')
parser.add_argument('-f',
nargs='+',
type=str,
dest='pdb_list',
metavar='FILE',
help='PDB files')
parser.add_argument('-s', '--step',
type=float,
dest='step',
metavar='STEP',
default=0.1,
help='Grid step in Angstroms')
parser.add_argument('-p', '--padding',
type=int,
dest='pad',
metavar='PADDING',
default=1,
help='Padding around cell in Angstroms')
args = parser.parse_args()
args_dict = vars(args)
return args_dict
vina_types = (
# 'H',
'C.2',
'C.3',
'C.ar',
'C.cat',
'O.2',
'O.3',
'O.co2',
'N.3',
'N.4',
'N.ar',
'N.am',
'N.pl3',
'S.3',
)
def check_pept(seq):
tseq = Seq(seq, Bio.Alphabet.IUPAC.protein)
if Bio.Alphabet._verify_alphabet(tseq):
return seq
else:
msg = "%s is not a valid peptide sequence" % tseq
raise ag.ArgumentTypeError(msg)
def init_mpi():
class MPI_basket(object):
def __init__(self):
pass
mpi = MPI_basket()
# Get MPI info
mpi.comm = MPI.COMM_WORLD
# Get number of processes
mpi.NPROCS = mpi.comm.size
# Get rank
mpi.rank = mpi.comm.rank
return mpi
def task(N, mpi):
l = np.ceil(float(N) / mpi.NPROCS).astype(np.int)
b = mpi.rank * l
e = min(b + l, N)
return (b, e)
gromos_types = {
("ALA", "N"): "N",
("ALA", "CA"): "CH1",
("ALA", "CB"): "CH3",
("ALA", "C"): "C",
("ALA", "O"): "O",
#
("ARG", "N"): "N",
("ARG", "CA"): "CH1",
("ARG", "CB"): "CH2",
("ARG", "CG"): "CH2",
("ARG", "CD"): "CH2",
("ARG", "NE"): "NE",
("ARG", "CZ"): "C",
("ARG", "NH1"): "NZ",
("ARG", "NH2"): "NZ",
("ARG", "C"): "C",
("ARG", "O"): "O",
#
("ASN", "N"): "N",
("ASN", "CA"): "CH1",
("ASN", "CB"): "CH2",
("ASN", "CG"): "C",
("ASN", "OD1"): "O",
("ASN", "ND2"): "NT",
("ASN", "C"): "C",
("ASN", "O"): "O",
#
("ASP", "N"): "N",
("ASP", "CA"): "CH1",
("ASP", "CB"): "CH2",
("ASP", "CG"): "C",
("ASP", "OD1"): "OM",
("ASP", "OD2"): "OM",
("ASP", "C"): "C",
("ASP", "O"): "O",
#
("CYS", "N"): "N",
("CYS", "CA"): "CH1",
("CYS", "CB"): "CH2",
("CYS", "SG"): "S",
("CYS", "C"): "C",
("CYS", "O"): "O",
#
("GLN", "N"): "N",
("GLN", "CA"): "CH1",
("GLN", "CB"): "CH2",
("GLN", "CG"): "CH2",
("GLN", "CD"): "C",
("GLN", "OE1"): "O",
("GLN", "NE2"): "NT",
("GLN", "C"): "C",
("GLN", "O"): "O",
#
("GLU", "N"): "N",
("GLU", "CA"): "CH1",
("GLU", "CB"): "CH2",
("GLU", "CG"): "CH2",
("GLU", "CD"): "C",
("GLU", "OE1"): "OM",
("GLU", "OE2"): "OM",
("GLU", "C"): "C",
("GLU", "O"): "O",
#
("GLY", "N"): "N",
("GLY", "CA"): "CH2",
("GLY", "C"): "C",
("GLY", "O"): "O",
#
("HIS", "N"): "N",
("HIS", "CA"): "CH1",
("HIS", "CB"): "CH2",
("HIS", "CG"): "C",
("HIS", "ND1"): "NR",
("HIS", "CD2"): "C",
("HIS", "CE1"): "C",
("HIS", "NE2"): "NR",
("HIS", "C"): "C",
("HIS", "O"): "O",
#
("ILE", "N"): "N",
("ILE", "CA"): "CH1",
("ILE", "CB"): "CH1",
("ILE", "CG1"): "CH2",
("ILE", "CG2"): "CH3",
("ILE", "CD"): "CH3",
("ILE", "CD1"): "CH3", # backup name variant
("ILE", "C"): "C",
("ILE", "O"): "O",
#
("LEU", "N"): "N",
("LEU", "CA"): "CH1",
("LEU", "CB"): "CH2",
("LEU", "CG"): "CH1",
("LEU", "CD1"): "CH3",
("LEU", "CD2"): "CH3",
("LEU", "C"): "C",
("LEU", "O"): "O",
#
("LYS", "N"): "N",
("LYS", "CA"): "CH1",
("LYS", "CB"): "CH2",
("LYS", "CG"): "CH2",
("LYS", "CD"): "CH2",
("LYS", "CE"): "CH2",
("LYS", "NZ"): "NT",
("LYS", "C"): "C",
("LYS", "O"): "O",
#
("MET", "N"): "N",
("MET", "CA"): "CH1",
("MET", "CB"): "CH2",
("MET", "CG"): "CH2",
("MET", "SD"): "S",
("MET", "CE"): "CH3",
("MET", "C"): "C",
("MET", "O"): "O",
#
("PHE", "N"): "N",
("PHE", "CA"): "CH1",
("PHE", "CB"): "CH2",
("PHE", "CG"): "C",
("PHE", "CD1"): "C",
("PHE", "CD2"): "C",
("PHE", "CE1"): "C",
("PHE", "CE2"): "C",
("PHE", "CZ"): "C",
("PHE", "C"): "C",
("PHE", "O"): "O",
#
("PRO", "N"): "N",
("PRO", "CA"): "CH1",
("PRO", "CB"): "CH2", # originally CH2r
("PRO", "CG"): "CH2", # originally CH2r
("PRO", "CD"): "CH2", # originally CH2r
("PRO", "C"): "C",
("PRO", "O"): "O",
#
("SER", "N"): "N",
("SER", "CA"): "CH1",
("SER", "CB"): "CH2",
("SER", "OG"): "OA",
("SER", "C"): "C",
("SER", "O"): "O",
#
("THR", "N"): "N",
("THR", "CA"): "CH1",
("THR", "CB"): "CH2",
("THR", "OG1"): "OA",
("THR", "CG2"): "CH3",
("THR", "C"): "C",
("THR", "O"): "O",
#
("TRP", "N"): "N",
("TRP", "CA"): "CH1",
("TRP", "CB"): "CH2",
("TRP", "CG"): "C",
("TRP", "CD1"): "C",
("TRP", "CD2"): "C",
("TRP", "NE1"): "NR",
("TRP", "CE2"): "C",
("TRP", "CE3"): "C",
("TRP", "CZ2"): "C",
("TRP", "CZ3"): "C",
("TRP", "CH2"): "C",
("TRP", "C"): "C",
("TRP", "O"): "O",
#
("TYR", "N"): "N",
("TYR", "CA"): "CH1",
("TYR", "CB"): "CH2",
("TYR", "CG"): "C",
("TYR", "CD1"): "C",
("TYR", "CD2"): "C",
("TYR", "CE1"): "C",
("TYR", "CE2"): "C",
("TYR", "CZ"): "C",
("TYR", "OH"): "OA",
("TYR", "C"): "C",
("TYR", "O"): "O",
#
("VAL", "N"): "N",
("VAL", "CA"): "CH1",
("VAL", "CB"): "CH2",
("VAL", "CG1"): "CH3",
("VAL", "CG2"): "CH3",
("VAL", "C"): "C",
("VAL", "O"): "O",
#
("ACE", "CH3"): "CH3",
("ACE", "C"): "C",
("ACE", "O"): "O",
#
("NME", "N"): "N",
("NME", "C"): "CH3",
("NME", "CH3"): "CH3",
}
tripos_types = {
("ACE", "O"): "O.2",
("ACE", "CH3"): "C.3",
("ACE", "C"): "C.2",
("ALA", "CA"): "C.3",
("ALA", "O"): "O.2",
("ALA", "CB"): "C.3",
("ALA", "C"): "C.2",
("ALA", "N"): "N.am",
("ARG", "C"): "C.2",
("ARG", "CD"): "C.3",
("ARG", "CZ"): "C.cat",
("ARG", "NH1"): "N.pl3",
("ARG", "CA"): "C.3",
("ARG", "N"): "N.am",
("ARG", "NE"): "N.pl3",
("ARG", "CB"): "C.3",
("ARG", "O"): "O.2",
("ARG", "NH2"): "N.pl3",
("ARG", "CG"): "C.3",
("ASN", "OD1"): "O.2",
("ASN", "N"): "N.am",
("ASN", "CB"): "C.3",
("ASN", "C"): "C.2",
("ASN", "O"): "O.2",
("ASN", "ND2"): "N.am",
("ASN", "CG"): "C.2",
("ASN", "CA"): "C.3",
("ASP", "CB"): "C.3",
("ASP", "O"): "O.2",
("ASP", "C"): "C.2",
("ASP", "N"): "N.am",
("ASP", "OD2"): "O.co2",
("ASP", "OD1"): "O.co2",
("ASP", "CA"): "C.3",
("ASP", "CG"): "C.2",
("CYS", "CB"): "C.3",
("CYS", "C"): "C.2",
("CYS", "O"): "O.2",
("CYS", "SG"): "S.3",
("CYS", "CA"): "C.3",
("CYS", "N"): "N.am",
("GLN", "CG"): "C.3",
("GLN", "O"): "O.2",
("GLN", "NE2"): "N.am",
("GLN", "C"): "C.2",
("GLN", "N"): "N.am",
("GLN", "OE1"): "O.2",
("GLN", "CA"): "C.3",
("GLN", "CD"): "C.2",
("GLN", "CB"): "C.3",
("GLU", "OE1"): "O.co2",
("GLU", "CG"): "C.3",
("GLU", "O"): "O.2",
("GLU", "CD"): "C.2",
("GLU", "CA"): "C.3",
("GLU", "OE2"): "O.co2",
("GLU", "N"): "N.am",
("GLU", "CB"): "C.3",
("GLU", "C"): "C.2",
("GLY", "CA"): "C.3",
("GLY", "C"): "C.2",
("GLY", "N"): "N.am",
("GLY", "O"): "O.2",
("HIS", "O"): "O.2",
("HIS", "NE2"): "N.ar",
("HIS", "CE1"): "C.ar",
("HIS", "N"): "N.am",
("HIS", "CD2"): "C.ar",
("HIS", "CG"): "C.ar",
("HIS", "CB"): "C.3",
("HIS", "CA"): "C.3",
("HIS", "ND1"): "N.ar",
("HIS", "C"): "C.2",
("ILE", "O"): "O.2",
("ILE", "N"): "N.am",
("ILE", "CB"): "C.3",
("ILE", "CG2"): "C.3",
("ILE", "CA"): "C.3",
("ILE", "C"): "C.2",
("ILE", "CG1"): "C.3",
("ILE", "CD"): "C.3",
("LEU", "CG"): "C.3",
("LEU", "N"): "N.am",
("LEU", "CD1"): "C.3",
("LEU", "CD2"): "C.3",
("LEU", "CA"): "C.3",
("LEU", "C"): "C.2",
("LEU", "O"): "O.2",
("LEU", "CB"): "C.3",
("LYS", "CE"): "C.3",
("LYS", "C"): "C.2",
("LYS", "NZ"): "N.4",
("LYS", "CA"): "C.3",
("LYS", "CB"): "C.3",
("LYS", "CG"): "C.3",
("LYS", "CD"): "C.3",
("LYS", "N"): "N.am",
("LYS", "O"): "O.2",
("MET", "SD"): "S.3",
("MET", "O"): "O.2",
("MET", "N"): "N.am",
("MET", "CB"): "C.3",
("MET", "CA"): "C.3",
("MET", "CG"): "C.3",
("MET", "C"): "C.2",
("MET", "CE"): "C.3",
("NME", "N"): "N.am",
("NME", "CH3"): "C.3",
("PHE", "N"): "N.am",
("PHE", "C"): "C.2",
("PHE", "CD1"): "C.ar",
("PHE", "CD2"): "C.ar",
("PHE", "O"): "O.2",
("PHE", "CZ"): "C.ar",
("PHE", "CA"): "C.3",
("PHE", "CE2"): "C.ar",
("PHE", "CE1"): "C.ar",
("PHE", "CB"): "C.3",
("PHE", "CG"): "C.ar",
("PRO", "CA"): "C.3",
("PRO", "CD"): "C.3",
("PRO", "C"): "C.2",
("PRO", "N"): "N.am",
("PRO", "O"): "O.2",
("PRO", "CG"): "C.3",
("PRO", "CB"): "C.3",
("SER", "CB"): "C.3",
("SER", "OG"): "O.3",
("SER", "N"): "N.am",
("SER", "CA"): "C.3",
("SER", "O"): "O.2",
("SER", "C"): "C.2",
("THR", "CA"): "C.3",
("THR", "O"): "O.2",
("THR", "N"): "N.am",
("THR", "CB"): "C.3",
("THR", "OG1"): "O.3",
("THR", "CG2"): "C.3",
("THR", "C"): "C.2",
("TRP", "NE1"): "N.ar",
("TRP", "CZ2"): "C.ar",
("TRP", "CE3"): "C.ar",
("TRP", "C"): "C.2",
("TRP", "CD1"): "C.ar",
("TRP", "CB"): "C.3",
("TRP", "CZ3"): "C.ar",
("TRP", "CE2"): "C.ar",
("TRP", "O"): "O.2",
("TRP", "CD2"): "C.ar",
("TRP", "N"): "N.am",
("TRP", "CG"): "C.ar",
("TRP", "CA"): "C.3",
("TRP", "CH2"): "C.ar",
("TYR", "CD2"): "C.ar",
("TYR", "CD1"): "C.ar",
("TYR", "CE1"): "C.ar",
("TYR", "CG"): "C.ar",
("TYR", "CZ"): "C.ar",
("TYR", "OH"): "O.3",
("TYR", "N"): "N.am",
("TYR", "CB"): "C.3",
("TYR", "O"): "O.2",
("TYR", "CA"): "C.3",
("TYR", "C"): "C.2",
("TYR", "CE2"): "C.ar",
("VAL", "N"): "N.am",
("VAL", "O"): "O.2",
("VAL", "CA"): "C.3",
("VAL", "CB"): "C.3",
("VAL", "CG2"): "C.3",
("VAL", "C"): "C.2",
("VAL", "CG1"): "C.3",
}
def import_retry(name, maxtry=50):
for i in range(maxtry):
try:
pass
# import name
break
except ImportError:
pass
def choose_artypes(arg):
if arg == 'default':
atypes_ = atypes.values()
rtypes = atypes
elif arg == 'gromos':
atypes_ = gromos_types.values()
rtypes = gromos_types
elif arg == 'tripos_neutral':
atypes_ = tripos_types.values()
rtypes = tripos_types
elif arg == 'tripos_charged':
atypes_ = tripos_types.values()
rtypes = copy(tripos_types)
for k, v in rtypes.items():
r, a = k
if a == 'O':
rtypes[k] = "O.co2"
rtypes[(r, 'OXT')] = "O.co2"
elif arg == 'vina':
atypes_ = vina_types
rtypes = vina_types
return (atypes_, rtypes)
def read_plist(fname):
with open(fname, 'r') as f:
raw = f.readlines()
plist = list()
for l in raw:
l_ = l.strip()
if l_ == '':
continue
if '_' in l_:
l__ = l_.split('_')
l_ = l__[0]
plist.append(l_)
plist = list(set(plist))
return np.array(plist, dtype='S%d' % len(plist[0]))
def reset_checkpoint(fn):
with h5py.File(fn, 'r+') as f:
f['checkpoint'][:] = np.zeros(f['checkpoint'].shape)
|
import datetime
import json
import pprint
import re
import traceback
from gi.repository import Gtk, Gdk, GObject
from gramps.gen.plug import Gramplet
from gramps.gui.plug import tool
from gramps.gui.utils import ProgressMeter
from gramps.gen.db import DbTxn
from gramps.gen.lib import Place, PlaceRef, PlaceName, PlaceType, Event, EventRef, EventType, Tag, Date
from gramps.gui.dialog import OkDialog
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
# regex helpers
zerototwozeros = r"0{0,2}"
oneortwodigits = r"\d{1,2}"
twodigits = r"\d{2}"
fourdigits = r"\d{4}"
dot = r"\."
dash = r"-"
sep = "[\.,-/]"
gt = "\>"
lt = "\<"
space = r"\s"
def p(**kwargs):
assert len(kwargs) == 1
for name,pat in kwargs.items():
return "(?P<{name}>{pat})".format(name=name,pat=pat)
raise Error
def optional(pat):
return "({pat})?".format(pat=pat)
def match(s,*args):
pat = "".join(args)
print("match:")
print(" ",s)
print(" ",pat)
flags = re.VERBOSE
r = re.fullmatch(pat,s,flags)
print(" ",r)
if r is None: return None
class Ret: pass
ret = Ret()
ret.__dict__ = r.groupdict()
return ret
def dateval(y,m,d):
print(y,m,d)
try:
y = int(y)
m = int(m)
d = int(d)
dt = datetime.date(y,m,d)
return (d,m,y,False)
except:
traceback.print_exc()
return None
class Dates(Gramplet):
def init(self):
self.root = self.__create_gui()
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add_with_viewport(self.root)
self.selected_handle = None
self.set_tooltip(_("Correct invalid dates"))
def db_changed(self):
self.__clear(None)
def __clear(self, obj):
pass
def __create_gui(self):
vbox = Gtk.VBox(orientation=Gtk.Orientation.VERTICAL)
vbox.set_spacing(4)
label = Gtk.Label(_("This gramplet helps to correct invalid dates..."))
label.set_halign(Gtk.Align.START)
label.set_line_wrap(True)
vbox.pack_start(label, False, True, 0)
self.replace_text = Gtk.CheckButton(_("Replace text"))
self.replace_text.connect("clicked", self.__select_replace_text)
self.use_regex = Gtk.CheckButton(_("Use regex"))
self.use_regex.set_sensitive(False)
replace_text_box = Gtk.HBox()
replace_text_box.pack_start(self.replace_text, False, True, 0)
replace_text_box.pack_start(self.use_regex, False, True, 0)
vbox.pack_start(replace_text_box, False, True, 0)
old_text_label = Gtk.Label()
old_text_label.set_markup("<b>{}</b>".format(_("Old text:")))
self.old_text = Gtk.Entry()
self.old_text.set_sensitive(False)
new_text_label = Gtk.Label()
new_text_label.set_markup("<b>{}</b>".format(_("New text:")))
self.new_text = Gtk.Entry()
self.new_text.set_sensitive(False)
replace_grid = Gtk.Grid(column_spacing=10)
replace_grid.set_margin_left(20)
replace_grid.attach(old_text_label,1,0,1,1)
replace_grid.attach(self.old_text,2,0,1,1)
replace_grid.attach(new_text_label,1,1,1,1)
replace_grid.attach(self.new_text,2,1,1,1)
vbox.pack_start(replace_grid, False, True, 0)
self.handle_dd_mm_yyyy = Gtk.CheckButton(label=_('31.12.1888 -> 1888-12-31'))
vbox.pack_start(self.handle_dd_mm_yyyy, False, True, 0)
self.handle_mm_yyyy = Gtk.CheckButton(label=_('.12.1888 -> 1888-12'))
vbox.pack_start(self.handle_mm_yyyy, False, True, 0)
self.handle_dd_yyyy = Gtk.CheckButton(label=_('31..1888 -> 1888-00-31'))
vbox.pack_start(self.handle_dd_yyyy, False, True, 0)
self.handle_yyyy = Gtk.CheckButton(label=_('..1888 -> 1888'))
vbox.pack_start(self.handle_yyyy, False, True, 0)
self.handle_intervals = Gtk.CheckButton(label=_('1888-99 -> 1888 - 1899'))
vbox.pack_start(self.handle_intervals, False, True, 0)
#self.handle_intervals2 = Gtk.CheckButton(label=_('1888-1899 -> 1888 - 1899'))
#vbox.pack_start(self.handle_intervals2, False, True, 0)
self.handle_before = Gtk.CheckButton(label=_('<1888/-1888 -> before 1888'))
vbox.pack_start(self.handle_before, False, True, 0)
self.handle_after = Gtk.CheckButton(label=_('>1888/1888- -> after 1888'))
vbox.pack_start(self.handle_after, False, True, 0)
btn_execute = Gtk.Button(label=_('Execute'))
btn_execute.connect("clicked", self.__execute)
vbox.pack_start(btn_execute, False, True, 20)
vbox.show_all()
return vbox
def __select_replace_text(self,obj):
checked = self.replace_text.get_active()
self.old_text.set_sensitive(checked)
self.new_text.set_sensitive(checked)
self.use_regex.set_sensitive(checked)
def __execute(self,obj):
with DbTxn(_("Correcting invalid dates"), self.dbstate.db) as self.trans:
selected_handles = self.uistate.viewmanager.active_page.selected_handles()
num_places = len(selected_handles)
for eventhandle in selected_handles:
event = self.dbstate.db.get_event_from_handle(eventhandle)
print(event)
dateobj = event.get_date_object()
datestr = dateobj.get_text()
pprint.pprint(dateobj.__dict__)
if dateobj.is_valid():
print(dateobj,"is valid")
continue
if datestr == "":
print(dateobj,"is blank")
continue
print(datestr,"is INvalid")
self.__fix_date(dateobj,datestr)
print("newdate:",repr(dateobj))
pprint.pprint(dateobj.__dict__)
#event.set_date_object(dateobj)
#dateobj.set_text_value(newdate)
if self.replace_text.get_active():
datestr = dateobj.get_text()
old_text = self.old_text.get_text()
new_text = self.new_text.get_text()
if self.use_regex.get_active():
try:
new_datestr = re.sub(old_text,new_text,datestr)
except Exception as e:
traceback.print_exc()
raise RuntimeError(_("Regex operation failed: {}").format(e))
else:
new_datestr = datestr.replace(old_text,new_text)
if new_datestr != datestr: dateobj.set(text=new_datestr,modifier=Date.MOD_TEXTONLY)
self.dbstate.db.commit_event(event,self.trans)
def __fix_date(self, dateobj, datestr):
if self.handle_dd_mm_yyyy.get_active():
# 31.12.1888 -> 31 DEC 1888
# 31,12,1888 -> 31 DEC 1888
# 31-12-1888 -> 31 DEC 1888
# 31/12/1888 -> 31 DEC 1888
r = match(datestr,
p(d=oneortwodigits),sep,
p(m=oneortwodigits),sep,
p(y=fourdigits))
if r:
val = dateval(r.y,r.m,r.d)
if val:
dateobj.set(value=val,modifier=Date.MOD_NONE)
return
if self.handle_mm_yyyy.get_active():
# .12.1888 -> 31 DEC 1888
r = match(datestr,
sep,
p(m=oneortwodigits),sep,
p(y=fourdigits))
if r:
val = dateval(r.y,r.m,1)
if val:
dateobj.set(value=(0,int(r.m),int(r.y),False),modifier=Date.MOD_NONE)
return
if self.handle_dd_yyyy.get_active():
# 31..1888 -> 1888-00-31
r = match(datestr,
p(d=oneortwodigits),sep,sep,
p(y=fourdigits))
if r:
val = dateval(r.y,1,r.d)
if val:
dateobj.set(value=(int(r.d),0,int(r.y),False),modifier=Date.MOD_NONE)
return
if self.handle_yyyy.get_active():
# ..1888 -> 1888
r = match(datestr,
sep,sep,
p(y=fourdigits))
if r:
if val:
dateobj.set(value=(0,0,int(r.y),False),modifier=Date.MOD_NONE)
return
if self.handle_intervals.get_active():
# 1888-1899
r = match(datestr,p(y1=fourdigits),dash,p(y2=fourdigits))
if r:
dateobj.set(modifier=Date.MOD_SPAN,value=(0,0,int(r.y1),False,0,0,int(r.y2),False),)
return
# 1888 -1899
r = match(datestr,p(y1=fourdigits),space,dash,p(y2=fourdigits))
if r:
dateobj.set(modifier=Date.MOD_SPAN,value=(0,0,int(r.y1),False,0,0,int(r.y2),False),)
return
# 1888- 1899
r = match(datestr,p(y1=fourdigits),dash,space,p(y2=fourdigits))
if r:
dateobj.set(modifier=Date.MOD_SPAN,value=(0,0,int(r.y1),False,0,0,int(r.y2),False),)
return
# 1888-99
r = match(datestr,p(y1=fourdigits),dash,p(y2=twodigits))
if r:
if int(r.y2) > int(r.y1[2:]):
century = r.y1[0:2]
#dateobj.set(modifier=Date.MOD_RANGE,value=(0,0,int(r.y1),False,0,0,int(century+r.y2),False))
dateobj.set(modifier=Date.MOD_SPAN,value=(0,0,int(r.y1),False,0,0,int(century+r.y2),False))
return
if self.handle_before.get_active():
r = match(datestr,dash,p(y=fourdigits))
if r:
text = "{r.y}".format(**locals())
dateobj.set(modifier=Date.MOD_BEFORE,value=(0,0,int(r.y),False))
return
r = match(datestr,lt,p(y=fourdigits))
if r:
text = "{r.y}".format(**locals())
dateobj.set(modifier=Date.MOD_BEFORE,value=(0,0,int(r.y),False))
return
if self.handle_after.get_active():
r = match(datestr,p(y=fourdigits),dash,)
if r:
text = "{r.y}".format(**locals())
dateobj.set(modifier=Date.MOD_AFTER,value=(0,0,int(r.y),False))
return
r = match(datestr,gt,p(y=fourdigits))
if r:
text = "{r.y}".format(**locals())
dateobj.set(modifier=Date.MOD_AFTER,value=(0,0,int(r.y),False))
return
'''
class Date:
...
def set(self, quality=None, modifier=None, calendar=None,
value=None, text=None, newyear=0):
"""
Set the date to the specified value.
:param quality: The date quality for the date (see :meth:`get_quality`
for more information).
Defaults to the previous value for the date.
:param modified: The date modifier for the date (see
:meth:`get_modifier` for more information)
Defaults to the previous value for the date.
:param calendar: The calendar associated with the date (see
:meth:`get_calendar` for more information).
Defaults to the previous value for the date.
:param value: A tuple representing the date information. For a
non-compound date, the format is (DD, MM, YY, slash)
and for a compound date the tuple stores data as
(DD, MM, YY, slash1, DD, MM, YY, slash2)
Defaults to the previous value for the date.
:param text: A text string holding either the verbatim user input
or a comment relating to the date.
Defaults to the previous value for the date.
:param newyear: The newyear code, or tuple representing (month, day)
of newyear day.
Defaults to 0.
The sort value is recalculated.
"""
'''
def transform(self,item,options,phase):
"""
Fix dates of the forms:
31.12.1888 -> 31 DEC 1888
31,12,1888 -> 31 DEC 1888
31-12-1888 -> 31 DEC 1888
31/12/1888 -> 31 DEC 1888
1888-12-31 -> 31 DEC 1888
.12.1888 -> DEC 1888
12.1888 -> DEC 1888
12/1888 -> DEC 1888
12-1888 -> DEC 1888
0.12.1888 -> DEC 1888
00.12.1888 -> DEC 1888
00.00.1888 -> 1888
00 JAN 1888 -> JAN 1888
1950-[19]59 -> FROM 1950 TO 1959
1950- -> FROM 1950
>1950 -> FROM 1950
-1950 -> TO 1950
<1950 -> TO 1950
"""
self.options = options
if item.tag == "DATE":
value = item.value.strip()
if options.handle_dd_mm_yyyy:
# 31.12.1888 -> 31 DEC 1888
# 31,12,1888 -> 31 DEC 1888
# 31-12-1888 -> 31 DEC 1888
# 31/12/1888 -> 31 DEC 1888
r = match(value,
p(d=oneortwodigits),sep,
p(m=oneortwodigits),sep,
p(y=fourdigits))
if r:
val = fmtdate(r.y,r.m,r.d)
if val:
item.value = val
return item
if options.handle_zeros:
# 0.0.1888 -> 1888
# 00.00.1888 -> 1888
r = match(value,zerototwozeros,dot,zerototwozeros,p(y=fourdigits))
if r:
item.value = r.y
return item
# 00.12.1888 -> DEC 1888
# .12.1888 -> DEC 1888
# 12.1888 -> DEC 1888
r = match(value,zerototwozeros,dot,p(m=oneortwodigits),dot,p(y=fourdigits))
if not r:
r = match(value,p(m=oneortwodigits),dot,p(y=fourdigits))
if r:
val = fmtdate(r.y,r.m,1)
if val:
item.value = val[3:]
return item
if options.handle_zeros2:
# 0 JAN 1888 -> JAN 1888
if value.startswith("0 "):
item.value = item.value[2:]
return item
# 00 JAN 1888 -> JAN 1888
if value.startswith("00 "):
item.value = item.value[3:]
return item
if options.handle_intervals:
# 1888-1899
r = match(value,p(y1=fourdigits),dash,p(y2=fourdigits))
if r:
century = r.y1[0:2]
item.value = "FROM {r.y1} TO {r.y2}".format(**locals())
return item
# 1888-99
r = match(value,p(y1=fourdigits),dash,p(y2=twodigits))
if r:
if int(r.y2) > int(r.y1[2:]):
century = r.y1[0:2]
item.value = "FROM {r.y1} TO {century}{r.y2}".format(**locals())
return item
if options.handle_intervals2:
# 1888-, >1888
tag = item.path.split(".")[-2]
kw = "AFT"
if tag in ('RESI','OCCU'): kw = "FROM"
r = match(value,p(y=fourdigits),dash)
if r:
item.value = "{kw} {r.y}".format(**locals())
return item
r = match(value,gt,p(y=fourdigits))
if r:
item.value = "{kw} {r.y}".format(**locals())
return item
if options.handle_intervals3:
# -1888, <1888
tag = item.path.split(".")[-2]
kw = "BE"
if tag in ('RESI','OCCU'): kw = "ennen"
r = match(value,dash,p(y=fourdigits))
if r:
item.value = "{kw} {r.y}".format(**locals())
return item
r = match(value,lt,p(y=fourdigits))
if r:
item.value = "{kw} {r.y}".format(**locals())
return item
if options.handle_yyyy_mm_dd:
# 1888-12-31
r = match(value,p(y=fourdigits),dash,p(m=twodigits),dash,p(d=twodigits))
if r:
val = fmtdate(r.y,r.m,r.d)
if val:
item.value = val
return item
if options.handle_yyyy_mm:
# 1888-12
r = match(value,p(y=fourdigits),dash,p(m=twodigits))
if r:
val = fmtdate(r.y,r.m,1)
if val:
item.value = val[3:]
return item
return True
|
import sys
import numpy as np
from numpy import linalg as LA
stringMatrix = sys.argv[1]
dimension = int(sys.argv[2])
stringMatrix = stringMatrix.replace('[', '')
stringMatrix = stringMatrix.replace(']', '')
stringMatrix = stringMatrix.replace(',', ' ')
stringMatrix = np.array(stringMatrix.split())
intMatrix = stringMatrix.astype(int)
# print(intMatrix[1], type(intMatrix[1]))
matrix = intMatrix.reshape(dimension, dimension)
w, v = LA.eig(matrix)
# print(w,v)
p="Nilpotent"
for i in w:
if i != 0:
p="Not nilpotent"
print(p)
|
import numpy as np
from more_itertools import chunked
from math import ceil
from itertools import chain
from collections import Counter, defaultdict
from multiprocessing import Pool, cpu_count
import re
import os
import pickle
import fastText as ft
from keras.preprocessing.sequence import pad_sequences
import spacy
from spacy.symbols import ORTH
def flattenlist(listoflists):
return list(chain.from_iterable(listoflists))
class Tokenizer():
def __init__(self, lang='en'):
self.tokenizer = spacy.load(lang)
self.re_br = re.compile(r'<\s*br\s*/?>', re.IGNORECASE)
for w in ('_eos_','_bos_','_unk_'):
self.tokenizer.tokenizer.add_special_case(w, [{ORTH: w}])
def sub_br(self, X):
return self.re_br.sub("\n", X)
def spacy_tokenizer(self, X):
return [x.text for x in self.tokenizer.tokenizer(self.sub_br(X.lower())) if not x.is_space]
def proc_text(self, X):
s = re.sub(r'([/#])', r' \1 ', X)
s = re.sub(' {2,}', ' ', X)
return self.spacy_tokenizer(s)
@staticmethod
def proc_all(X):
tok = Tokenizer()
return [tok.proc_text(x) for x in X]
def fit_transform(self, X):
core_usage = (cpu_count() + 1) // 2
with Pool(core_usage) as p:
chunk_size = ceil(len(X) / core_usage)
results = p.map(Tokenizer.proc_all, chunked(X, chunk_size), chunksize=1)
return flattenlist(results)
def transform(self, X):
return Tokenizer.proc_all(X)
class TextDataSet(object):
def __init__(self, max_vocab, maxlen, min_freq=1, padding='pre'):
self.max_vocab = max_vocab
self.min_freq = min_freq
self.maxlen = maxlen
self.padding = padding
self.tokenizer = Tokenizer()
def fit(self, text, tokenize=True):
if tokenize:
text = self.tokenizer.fit_transform(text)
self.freq = Counter(p for sent in text for p in sent)
self.idx2word = [word for word, count in self.freq.most_common(self.max_vocab) if count > self.min_freq]
self.idx2word.insert(0, '_unk_')
self.idx2word.insert(1, '_pad_')
self.idx2word.insert(2, '_bos_')
self.idx2word.insert(3, '_eos_')
self.word2idx = defaultdict(lambda: 0, {word: i for i, word in enumerate(self.idx2word)})
self.pad_int = self.word2idx['_pad_']
return text
def fit_transform(self, text, tokenize=True):
text = self.fit(text, tokenize=tokenize)
text_padded = self.internal_transform(text, tokenize=False)
return np.array(text_padded)
def internal_transform(self, text, tokenize=True):
if tokenize:
text = self.tokenizer.fit_transform(text)
text_ints = np.array([[self.word2idx[i] for i in sent] for sent in text])
text_padded = pad_sequences(text_ints, maxlen=self.maxlen, padding=self.padding, value=self.pad_int)
return np.array(text_padded)
def transform(self, text, tokenize=True, word2idx=None, maxlen=None, padding=None):
if tokenize:
text = self.tokenizer.fit_transform(text)
if word2idx:
self.word2idx = word2idx
if maxlen:
self.maxlen = maxlen
if padding:
self.padding = padding
text_ints = np.array([[self.word2idx[i] for i in sent] for sent in text])
text_padded = pad_sequences(text_ints, maxlen=self.maxlen, padding=self.padding, value=self.pad_int)
return np.array(text_padded)
def limit_unk_vocab(train_text, train_summary, all_text_model, max_unk_text=1, max_unk_summary=1):
train_text_reduced = []
train_summary_reduced = []
for txt, sumy in zip(train_text, train_summary):
unk_txt = len([x for x in txt if x == all_text_model.word2idx['_unk_']])
unk_sumy = len([x for x in sumy if x == all_text_model.word2idx['_unk_']])
if (unk_txt <= max_unk_text) and (unk_sumy <= max_unk_summary):
train_text_reduced.append(txt.tolist())
train_summary_reduced.append(sumy.tolist())
assert(len(train_text_reduced) == len(train_summary_reduced))
print('new text size', len(train_text_reduced))
print('new summary size', len(train_summary_reduced))
return np.array(train_text_reduced), np.array(train_summary_reduced)
|
# Usage
# Run the animation in "pacman.gif" for 5 total cycles. When loading from an animated GIF file, the timing of each frame is automatically loaded from the file but if a different, constant time is needed the "fps" or "sleep" parameters of run() can be used.
#
# ```python
# import bibliopixel.image as image
# anim = image.ImageAnim(led, "./anims/pacman.gif")
# anim.run(untilComplete = True, max_cycles = 5)
# ```
#
# Run the animation from sequential files stored in "./anim/supermario". Files are loaded in alpha/numeric order. To ensure files load in the same order on all systems, best practice is to name the files as: 001.bmp, 002.bmp, 003.bmp, 004.bmp, etc...
#
# Note that when loading static files as a sequence, the "fps" or "sleep" parameters of run() are required to control the timing between each frame. Like above, untilComplete and max_cycles are still valid when using static sequences.
#
# ```python
# import bibliopixel.image as image
# anim = image.ImageAnim(led, "./anims/supermario/")
# anim.run()
# ```
from bibliopixel.animation import BaseMatrixAnim
import bibliopixel.log as log
try:
from PIL import Image, ImageSequence
except ImportError as e:
error = "Please install Python Imaging Library: pip install pillow"
log.logger.error(error)
raise ImportError(error)
import glob
import os
import bibliopixel.colors as colors
import threading
import random as rand
def _getBufferFromImage(img, led, bgcolor, bright, offset):
duration = None
if 'duration' in img.info:
duration = img.info['duration']
w = led.width - offset[0]
if img.size[0] < w:
w = img.size[0]
h = led.height - offset[1]
if img.size[1] < h:
h = img.size[1]
ox = offset[0]
oy = offset[1]
buffer = [0 for x in range(led.numLEDs * 3)]
gamma = led.driver[0].gamma
if bgcolor != (0, 0, 0):
for i in range(led.numLEDs):
buffer[i * 3 + 0] = gamma[bgcolor[0]]
buffer[i * 3 + 1] = gamma[bgcolor[1]]
buffer[i * 3 + 2] = gamma[bgcolor[2]]
frame = Image.new("RGBA", img.size)
frame.paste(img)
for x in range(ox, w + ox):
for y in range(oy, h + oy):
if x < 0 or y < 0:
continue
pixel = led.matrix_map[y][x]
r, g, b, a = frame.getpixel((x - ox, y - oy))
if a == 0:
r, g, b = bgcolor
else:
r = (r * a) >> 8
g = (g * a) >> 8
b = (b * a) >> 8
if bright != 255:
r, g, b = colors.color_scale((r, g, b), bright)
buffer[pixel * 3 + 0] = gamma[r]
buffer[pixel * 3 + 1] = gamma[g]
buffer[pixel * 3 + 2] = gamma[b]
return (duration, buffer)
def _loadGIFSequence(imagePath, led, bgcolor, bright, offset):
img = Image.open(imagePath)
if offset == (0, 0):
w = 0
h = 0
if img.size[0] < led.width:
w = (led.width - img.size[0]) / 2
if img.size[1] < led.height:
h = (led.height - img.size[1]) / 2
offset = (w, h)
images = []
count = 0
for frame in ImageSequence.Iterator(img):
images.append(
_getBufferFromImage(frame, led, bgcolor, bright, offset))
count += 1
return images
class loadnextthread(threading.Thread):
def __init__(self, imganim):
super(loadnextthread, self).__init__()
self.setDaemon(True)
self._stop = threading.Event()
self._wait = threading.Event()
self.anim = imganim
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def loading(self):
return self._wait.isSet()
def loadNext(self):
self._wait.set()
def run(self):
while not self.stopped():
self._wait.wait()
self.anim.loadNextGIF()
self._wait.clear()
class ImageAnim(BaseMatrixAnim):
def __init__(self, led, imagePath, offset=(0, 0), bgcolor=colors.Off, brightness=255, cycles=1, random=False, use_file_fps=True):
"""Helper class for displaying image animations for GIF files or a set of bitmaps
led - LEDMatrix instance
imagePath - Path to either a single animated GIF image or folder of GIF files
offset - X,Y tuple coordinates at which to place the top-left corner of the image
bgcolor - RGB tuple color to replace any transparent pixels with. Avoids transparent showing as black
brightness - Brightness value (0-255) to scale the image by. Otherwise uses master brightness at the time of creation
"""
super(ImageAnim, self).__init__(led)
self.cycles = cycles
self.cycle_count = 0
self.random = random
self.use_file_fps = use_file_fps
self._bright = brightness
if self._bright == 255 and led.masterBrightness != 255:
self._bright = led.masterBrightness
self._bgcolor = colors.color_scale(bgcolor, self._bright)
self._offset = offset
self._image_buffers = [None, None]
self._cur_img_buf = 1 # start here because loadNext swaps it
self.folder_mode = os.path.isdir(imagePath)
self.gif_files = []
self.gif_indices = []
self.folder_index = -1
self.load_thread = None
if self.folder_mode:
self.gif_files = glob.glob(imagePath + "/*.gif")
self.gif_indices = range(len(self.gif_files))
self.loadNextGIF() # first load is manual
self.swapbuf()
self.load_thread = loadnextthread(self)
self.load_thread.start()
self.load_thread.loadNext() # pre-load next image
else:
self.loadGIFFile(imagePath)
self.swapbuf()
self._curImage = 0
def _exit(self, type, value, traceback):
if self.load_thread:
self.load_thread.stop()
def loadGIFFile(self, gif):
_, ext = os.path.splitext(gif)
next_buf = self.next_img_buf()
if ext.lower().endswith("gif"):
log.logger.info("Loading {0} ...".format(gif))
self._image_buffers[next_buf] = _loadGIFSequence(gif, self._led, self._bgcolor, self._bright, self._offset)
else:
raise ValueError('Must be a GIF file!')
def loadNextGIF(self):
if self.random:
if len(self.gif_indices) < 2:
self.folder_index = self.gif_indices[0]
self.gif_indices = range(len(self.gif_files))
else:
self.folder_index = self.gif_indices.pop(rand.randrange(0, len(self.gif_indices)))
else:
self.folder_index += 1
if self.folder_index >= len(self.gif_files):
self.folder_index = 0
self.loadGIFFile(self.gif_files[self.folder_index])
def next_img_buf(self):
i = self._cur_img_buf
i += 1
if i > 1:
i = 0
return i
def swapbuf(self):
self._cur_img_buf = self.next_img_buf()
def preRun(self):
self._curImage = 0
def step(self, amt=1):
self._led.all_off()
img = self._image_buffers[self._cur_img_buf]
self._led.setBuffer(img[self._curImage][1])
if self.use_file_fps:
self._internalDelay = img[self._curImage][0]
self._curImage += 1
if self._curImage >= len(img):
self._curImage = 0
if self.folder_mode:
if self.cycle_count < self.cycles - 1:
self.cycle_count += 1
elif not self.load_thread.loading(): # wait another cycle if still loading
self.animComplete = True
self.load_thread.loadNext()
self.swapbuf()
self.cycle_count = 0
else:
self.animComplete = True
self._step = 0
MANIFEST = [
{
"class": ImageAnim,
"controller": "matrix",
"desc": "Display animated GIFs",
"display": "ImageAnim",
"id": "ImageAnim",
"params": [
{
"default": None,
"help": "Path to either a single GIF or folder of GIF files",
"id": "imagePath",
"label": "GIF/Folder Path",
"type": "str"
},
{
"default": [
0,
0,
0
],
"help": "",
"id": "bgcolor",
"label": "Background",
"type": "color"
},
{
"default": 255,
"help": "",
"id": "brightness",
"label": "Brightness",
"type": "int"
},
{
"default": [0, 0],
"help": "Image placement offset",
"id": "offset",
"label": "Offset",
"type": "multi_tuple",
"controls": [{
"label": "X",
"type": "int",
"default": 0
}, {
"label": "Y",
"type": "int",
"default": 0
}]
},
{
"default": 1,
"help": "# of cycles to run before next GIF. Folder mode only.",
"id": "cycles",
"label": "# Cycles",
"type": "int"
},
{
"default": True,
"help": "Random GIF selection. Folder mode only.",
"id": "random",
"label": "Random",
"type": "bool"
},
{
"default": True,
"help": "Use framerate stored in GIF",
"id": "use_file_fps",
"label": "Use File FPS",
"type": "bool"
}
],
"type": "animation"
}
]
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Blog(models.Model):
title = models.CharField(max_length = 200)
pub_date = models.DateTimeField('data published')
body = models.TextField()
writer=models.ForeignKey(User, on_delete=models.CASCADE, null=True) #on_delete: 객체와의 연결을 끊을 때, 삭제할 때 사용. null=true: 빈 정보라도 괜찮다고 하는 것. false인 게 가장 좋음. 데이터가 꽉 차 있는게 좋다는 것.
likes=models.ManyToManyField(User, through='Like', through_fields=('blog','user'), related_name="likes" ) #manytomany model 사용. like를 통과하는데 뭘 가지고 중계하느냐?=> user, likes로 중계모델을 둔다.
def __str__(self):
return self.title
class Comment(models.Model):
body=models.TextField(max_length=500)
pub_date=models.DateTimeField('data published')
writer=models.ForeignKey(User, on_delete=models.CASCADE)
post=models.ForeignKey(Blog, on_delete=models.CASCADE) #블로그 글 중 하나
class Like(models.Model):
blog=models.ForeignKey(Blog, on_delete=models.CASCADE)
user=models.ForeignKey(User, on_delete=models.CASCADE) #연결인데 foeignkey 쓴 이유=좋아요 입장이기 때문에 유저 한 개당 여러 개의 좋아요가 만들어질 수 있고 블로그에 대해서는 여러 유저들이 좋아요를 달 수 있기 때문에 like입장에서 user와 1대 다 모델을 형성. |
from time import sleep
def filtr(ls):
if ls['message'].get('chat_id') != None:
if ls['message']['chat_id'] == 1:
return True
else:
return False
else:
return False
def filtr2(ls):
if ls['message'].get('user_id') != None:
if ls['message']['user_id'] == 404544337:
return True
else:
return False
else:
return False
def get_new_message(api,flag):
dict_of_messages = {}
try:
while True:
m = api.messages.getDialogs(unread=1)
if m['count'] != 0:
for elem in m['items']:
if elem['message']['body'].find("бт ") != -1:
if elem['message'].get('chat_id') != None:
dict_of_messages.update([(2000000000 + elem['message']['chat_id'], elem['message']['body'])])
api.messages.markAsRead(peer_id=str(2000000000 + elem['message']['chat_id']), oauth=1, v=5.45)
else:
dict_of_messages.update([(elem['message']['user_id'], elem['message']['body'])])
api.messages.markAsRead(peer_id=str(elem['message']['user_id']), oauth=1, v=5.45)
else:
continue
return dict_of_messages
else:
sleep(0.45)
except Exception as e:
raise e
|
import requests, urllib3, threading, urllib.parse as urlparse, atexit, time
from urllib.parse import parse_qs
from datetime import date, datetime
from bs4 import BeautifulSoup
from viberbot.api.messages import URLMessage
from viberbot.api.messages.text_message import TextMessage
from apscheduler.schedulers.background import BackgroundScheduler
host = 'https://er.medkirov.ru'
set_links = set()
clients = ['LL3mgrqogJK9yxBlHFvvSQ=='] #, 'J+GEZQj348xxUEK77WH/gg==', 'tPJ5GNMNYhx6MP0XND9Dmw==']
import pybot
def search_tickets():
urllib3.disable_warnings()
current_year = date.today().year
current_week = date.today().isocalendar()[1]
last_week = current_week + 3
global host, set_links
while current_week <= last_week:
url = host + '/cities/297576/hospitals/215/specializations/64/calendars/?week=' + str(current_week) + '&year=' + str(current_year)
response = requests.get(url, verify=False)
soup = BeautifulSoup(response.text, 'html.parser')
table_rows = soup.find_all(class_='doctor')
for data in table_rows:
ticket = data.find(class_='freeTickets')
if ticket:
ticket_url = host + ticket.find_parent('a')['href']
parsed = urlparse.urlparse(ticket_url)
ticket_date = datetime.strptime(
parse_qs(parsed.query)['date'][0], '%d.%m.%Y'
)
response = requests.get(ticket_url, verify=False)
soup = BeautifulSoup(response.text, 'html.parser')
times = soup.find_all('a', class_='kticket-free')
doctor = soup.find('address').find(class_='media-heading').text
if doctor == 'Довыденко Виктория Евгеньевна':
for ttime in times:
link = host + ttime['href']
if link not in set_links:
text = ticket_date.strftime('%d.%m.%Y') + ' в ' + ttime.text + \
'\n' + doctor + '\n' + link
for client in clients:
pybot.viber.send_messages(
client, [TextMessage(text=text)]
)
set_links.add(link)
time.sleep(5)
current_week += 1
scheduler = BackgroundScheduler()
scheduler.add_job(func=search_tickets, trigger='interval', seconds=5)
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
|
#find digits
t = int(input())
c = 0
res = []
for i in range(t):
n = input().strip()
temp = int(n)
for i in n:
if int(i) != 0:
if temp % int(i) == 0:
c += 1
res.append(c)
c = 0
for c in res:
print(c)
|
from data_preprocessor import snd_ns_data_prepocessor as data_preprocessor
import subprocess
from sys import platform
from score_calculator import score_calculator
from plotter import ROC_AUC_plotter
def main():
snd_folder_names = ['snd-cert', 'snd-unm']
syscalls_file_dir = 'negative-selection/syscalls'
chunk_size = 15
data_prep = data_preprocessor(chunk_size=chunk_size, snd_folder_names=snd_folder_names, syscalls_file_dir=syscalls_file_dir)
data_prep.preprocess_and_save_snd_data()
#Tested on windows! Should work on linux. dont forget to set chunk size and r values in scripts if changed!
#TODO: support for parameter passing to scripts.
if platform == "linux" or platform == "linux2":
subprocess.call('train_test_negative_selection_store_scores.sh')
elif platform == "darwin":
# MAC
raise NotImplementedError("negsel2 MAC OS scripting not yet supported.")
elif platform == "win32":
# Windows
subprocess.call('train_test_negative_selection_store_scores.bat', shell=True)
score_calc = score_calculator(syscalls_folder_dir=syscalls_file_dir, snd_folder_names=snd_folder_names, chunk_size=chunk_size)
score_calc.calculate_average_and_save_to_file()
plotter = ROC_AUC_plotter(syscalls_folder_dir=syscalls_file_dir, snd_folder_names=snd_folder_names, chunk_size=chunk_size)
plotter.plot_and_save_ROC_AUC()
if __name__ == '__main__':
main()
|
import numpy as np
import perturbations as PB
import math
G = 4.32275e-3 # (km/s)^2 pc/Msun
G_pc = G*1.05026504e-27 # (pc/s)^2 pc/Msun
kmtopc = 1.0/(3.086*10**13)
MNS = 1.4 # Msun
RNS = 10*kmtopc # pc
from scipy.interpolate import interp1d
from scipy.integrate import quad, cumtrapz
from scipy.special import erfi
from scipy.special import gamma as gamma_fun
##
## NS distributions
##
#Fraction of bound NS's, roughly from Table 4 of https://arxiv.org/pdf/0908.3182.pdf
f_bound = 0.8
N_bulge = f_bound*6.0e8
N_disk = f_bound*4.0e8
def f_NFW(x):
return np.log(1+x) - x/(1+x)
def density(mass, radius):
return 3.*mass/(4.*np.pi*radius**3)
def del_density(mass, radius):
return 3.*density(mass, radius)/radius
def MCradius(mass, density):
return (3.*mass/(4.*np.pi*density))**(1./3.)
# Interpolation function for radius as a function of NFW density
#--------------------------------------------------------------
c = 100
x_list = np.geomspace(1e-5, 1e5, 1000) # x = r/R_max = r/(c r_s)
rho_list = 1/((c*x_list)*(1+c*x_list)**2)
x_of_rho_interp = interp1d(rho_list, x_list, bounds_error=False, fill_value = 0.0)
def x_of_rho(rho):
x = x_of_rho_interp(rho)
m1 = rho > np.max(rho_list)
x[m1] = c**-1/(rho[m1])
m2 = rho < np.min(rho_list)
x[m2] = c**-1/(rho[m2])**(1/3)
return x
#--------------------------------------------------------------
#Number densities of neutron stars
#R_cyl is the cylindrical galactocentric distance
def nNS_bulge(R_cyl, Z):
#Bulge distribution from McMillan, P.J. 2011, MNRAS, 414, 2446 1102.4340
# R_cyl and Z are cylindrical coordinates in pc
r0 = 75. #pc ### This value has been changed from 750pc to 75pc
rc = 2.1e3 #pc
Nnorm = 1/90218880. #pc^{-3} - normalising constant so that the distribution integrates to 1 over the whole volume
rp2 = R_cyl**2 + 4*Z**2
rp = np.sqrt(rp2)
return N_bulge*Nnorm*np.exp(-rp2/rc**2)/(1+rp/r0)**(1.8)
def nNS_disk(R_cyl, Z):
#Lorimer profile, Eq. 6 of https://arxiv.org/pdf/1805.11097.pdf
#Best fit parameters from Table 3 (Broken Power-Law)
Rsun = 8.5e3 #We use R_sun = 8.5 kpc here for consistency with the fits in 1805.11097
B = 3.91
C = 7.54
Zs = 0.76e3
Norm = C**(B+2)/(4*np.pi*Rsun**2*Zs*np.exp(C)*gamma_fun(B+2))
return N_disk*Norm*(R_cyl/Rsun)**B*np.exp(-C*(R_cyl-Rsun)/Rsun)*np.exp(-np.abs(Z)/Zs)
def nNS(R_cyl, Z):
return nNS_bulge(R_cyl, Z) + nNS_disk(R_cyl, Z)
#Tabulate
nNS_sph_interp = None
def calcNS_sph():
#Galactocentric, spherical R
r_list = np.geomspace(1, 200e3, 10000)
nr_list = 0.0*r_list
for i, r in enumerate(r_list):
Z_list = 0.99999*np.linspace(-r, r, 1001)
R_list = np.sqrt(r**2 - Z_list**2)
nr_list[i] = (0.5/r)*np.trapz(nNS(R_list, Z_list), Z_list)
return interp1d(r_list, nr_list, bounds_error=False, fill_value=0.0)
#Galactocentric, spherical R
def nNS_sph(R_sph):
global nNS_sph_interp
if (nNS_sph_interp is None):
nNS_sph_interp = calcNS_sph()
return nNS_sph_interp(R_sph)
#Parallelised for Z
def dPdZ(R_sph, Z):
ma = R_sph**2 > Z**2 #Mask for valid values
R_cyl = np.sqrt(R_sph**2 - Z[ma]**2)
result = 0.0*Z
result[ma] = nNS(R_cyl, Z[ma])/(2*R_sph*nNS_sph(R_sph))
#P(Z) = P(R_sph, Z)/P(R_sph) = (2 pi R_sph n(R_cyl, Z)/(4 pi R_sph^2 <n(R_sph)>))
return result
#--------------------------------------------------------------
## NFW profile for AMC distribution
def rhoNFW(R):
rho0 = 1.4e7*1e-9 # Msun*pc^-3, see Table 1 in 1304.5127
rs = 16.1e3 # pc
aa = R/rs
return rho0/aa/(1+aa)**2
#def P_r_given_rho(R, rho, mmin, mmax, gg):
# mass = 4.*np.pi*rho*R**3/3.
# print('made it here', HMF_sc(mass, mmin, mmax, gg), mass, mmin, mmax, gg)
# quit()
# return 3.*mass/R*HMF_sc(mass, mmin, mmax, gg)/mass
##
## Cross-section
##
def sigma_grav(R_AMC):
# The velocity dispersion is Maxwell-Boltzmann with dispersion sigma_u=290km/s
# The cross-section is \pi(R^2)(1+sigma_G2/u^2) with sigma_G2 = 2GM/R
# The velocity-averaged cross section is \sqrt(2/pi/sigma_u^2)(sigma_G +2sigma_u^2)
sigma_G2 = 2.0*G*MNS/(RNS + R_AMC) # (km/s)**2
sigma_u2 = 290.0**2 # (km/s)**2
## Returns the cross section*u in pc^3/s
return (RNS**2 + R_AMC**2)*np.sqrt(2.0*np.pi/sigma_u2)*(sigma_G2 +2.0*sigma_u2)*kmtopc
def Vcirc(rho):
rho0 = 1.4e7*1e-9 # Msun pc^-3, see Table 1 in 1304.5127
rs = 16.1e3 # pc
Menc = 4*np.pi*rho0*rs**3*(np.log((rs+rho)/rs) - rho/(rs+rho))
return np.sqrt(G_pc*Menc/rho) # pc/s
def MC_profile(delta, r):
# r is in units of the axion MC
# Return the density of axion MC in GeV/pc^3
hbarc = 0.197e-13 # GeV*cm
pc = 3.086e18 # cm to pc
rhoeq = 2.45036e-37*(pc/hbarc)**3 # GeV/pc^3
rhoc = 140.*(1. + delta)*delta**3*rhoeq
return 0.25*rhoc/r**(9/4)
#def MC_profile_self(M, R, r):
# ## r is in units of the axion MC radius
# ## M is in units of M_Sun
# ## R is in pc
# ## Returns the density of axion MCs in GeV/pc^3
# MSuninGeV = 1.115e57
# rho0 = 1.3187e62 # Density of axion MC in GeV/pc^3
# return 3.*M/(4.*np.pi*R**3)*MSuninGeV/r**(9/4)
def MC_profile_self(density, r):
## r is in units of the axion MC radius
## Returns the density of axion MCs in GeV/pc^3
MSuninGeV = 1.115e57
#Factor of 0.25 because density at surface is rho/4
return 0.25*density*MSuninGeV/r**(9/4)
def MC_profile_NFW(density, r):
c = 100
MSuninGeV = 1.115e57
rho_s = density*c**3/(3*f_NFW(c))
return rho_s*MSuninGeV/(c*r*(1+c*r)**2)
#Conversion radius
def rc(theta, B0, P0, mGhz):
## code Eq.5 in 1804.03145
## Returns the conversion radius in pc
rc0 = 7.25859e-12 # pc, equal to 224 km
ft = np.abs(3.*np.cos(theta)**2 - 1)
return rc0*(RNS/(10*kmtopc))*(ft*B0/1.e14*1/P0/mGhz**2)**(1./3.)
#Radio signal
def signal(theta, Bfld, Prd, density, fa, ut, s0, r, ret_bandwidth=False, profile = "PL"):
# Returns the expected signal in microjansky
cs = 3.0e8 # speed of light in m/s
pc = 3.0860e16 # pc in m
hbar = 6.582e-16 # GeV/GHz
hbarT = 6.582e-25 # GeV s
GaussToGeV2 = 1.953e-20 # GeV^2
alpEM = 1/137.036 # Fine-structure constant
Lambda = 0.0755 # confinment scale in GeV
ma = Lambda**2/fa # axion mass in GeV
maGHz = ma/hbar # axion mass in GHz
ga = alpEM/(2.*np.pi*fa)*(2./3.)*(4. + 0.48)/1.48 # axion-photon coupling in GeV^-1
BGeV = GaussToGeV2*Bfld # B field in GeV^2
vrel0 = 1.e-3 # relative velocity in units of c
vrel = vrel0*cs/pc # relative velocity in pc/s
bandwidth0 = vrel0**2/(2.*np.pi)*maGHz*1.e9 # Bandwidth in Hz
rcT = rc(theta, Bfld, Prd, maGHz) # conversion radius in pc
vc = 0.544467*np.sqrt(RNS/rcT) # free-fall velocity at rc in units of c
BWD = bandwidth0*(ut/vrel)**2 # bandwidth in Hz
if (profile == "PL"):
rhoa = MC_profile_self(density, r)
elif (profile == "NFW"):
rhoa = MC_profile_NFW(density, r)
Flux = np.pi/6.*ga**2*vc*(RNS/rcT)**3*BGeV**2*np.abs(3.*np.cos(theta)**2-1.)*(rhoa*RNS**3/ma)
# 1.e32 is the conversion from SI to muJy. hbar converts from GeV to s^-1
if ret_bandwidth:
# GeV/m^2 -> 1e32
# 1 muJy = 1e-32 J/m^2
return Flux/(BWD*4.*np.pi*(s0*pc)**2*hbarT) * 1.e32, BWD
else:
return Flux/(BWD*4.*np.pi*(s0*pc)**2*hbarT) * 1.e32,
#Isotropized radio signal
def signal_isotropic(Bfld, Prd, density, fa, ut, s0, r, ret_bandwidth=False, profile = "PL"):
# Returns the expected signal in microjansky
cs = 3.0e8 # speed of light in m/s
pc = 3.0860e16 # pc in m
hbar = 6.582e-16 # GeV/GHz
hbarT = 6.582e-25 # GeV s
GaussToGeV2 = 1.953e-20 # GeV^2
alpEM = 1/137.036 # Fine-structure constant
Lambda = 0.0755 # confinment scale in GeV
GeV_to_J = 1.602e-10
ma = Lambda**2/fa # axion mass in GeV
maGHz = ma/hbar # axion mass in GHz
ga = alpEM/(2.*np.pi*fa)*(2./3.)*(4. + 0.48)/1.48 # axion-photon coupling in GeV^-1
BGeV = GaussToGeV2*Bfld # B field in GeV^2
#rcT = rc(theta, Bfld, Prd, maHz) # conversion radius in pc
#Mean conversion radius: = 0.5 int_{-1}^{1} |3 x - 1|^{1/3} dx ~ 0.869
#Fix theta = pi, to give |3 cos(theta) - 1|^{1/3} = 1 in the expression for r_c
rcT_mean = 0.869*rc(np.pi, Bfld, Prd, maGHz) # *MEAN* conversion radius in pc
#vc = 0.544467*np.sqrt(RNS/rcT_mean) # free-fall velocity at rc in units of c
vc = np.sqrt(2*G_pc*MNS/rcT_mean)/(cs/pc) # free-fall velocity at *MEAN* rc in units of c
print(vc)
BWD = 1.0e3 + density*0.0 #Fix bandwidth to 1 kHz
if (profile == "PL"):
rhoa = MC_profile_self(density, r)
elif (profile == "NFW"):
rhoa = MC_profile_NFW(density, r)
#Enhanced density at r_c (obtained by conservation of flux)
rho_rc = rhoa*np.sqrt(2*G_pc*MNS/rcT_mean)/ut
#Correct angular dependence of B-field should be B^2(theta) ~ (3.*np.cos(theta)**2+1.)
#See. Eq. 2 of https://arxiv.org/pdf/1811.01020.pdf. Taking the angular average we just
#get a factor of 2: 0.5 int_{-1}^1 (3.*x**2+1.) dx = 2
#Flux = 2*np.pi/6.*ga**2*vc*(RNS/rcT_mean)**3*BGeV**2*(rho_rc*RNS**3/ma)
Flux = 2*np.pi/6.*ga**2*(RNS/rcT_mean)**3*BGeV**2*(rho_rc*RNS**3/ma) #Corrected by a factor of 1/vc
# 1.e32 is the conversion from SI to muJy. hbar converts from GeV to s^-1
if ret_bandwidth:
return Flux*GeV_to_J/(BWD*4.*np.pi*(s0*pc)**2*hbarT) * 1.e32, BWD
else:
return Flux*GeV_to_J/(BWD*4.*np.pi*(s0*pc)**2*hbarT) * 1.e32,
#BJK: I don't think this code is used...
def n(rho, psi):
# The AMC stars with a positions defined by rho in pc
# Its angle out of the plane is given by psi
rho0 = 1.4e7*1e-9 # Msun pc^-3, see Table 1 in 1304.5127
rs = 16.1e3 # pc
Menc = 4*np.pi*rho0*rs**3*(np.log((rs+rho)/rs) - rho/(rs+rho))
gravfactor = lambda t: np.sqrt(G_pc*(Menc)/rho**3)*t
R = lambda t: np.sqrt((np.cos(psi)*rho*np.cos(gravfactor(t)))**2 + (rho*np.sin(gravfactor(t)))**2)
Z = lambda t: np.sin(psi)*rho*np.cos(gravfactor(t))
n_t = lambda t: nNS(R(t), Z(t))
return n_t
def Gamma(nfunc, Tage, sigmav ):
Ntfunc = lambda t: nfunc(t)*sigmav
tlist = np.geomspace(1, Tage, 1000)
return np.trapz(Ntfunc(tlist), x=tlist)
def inverse_transform_sampling_log(function, x_range, nbins=1000, n_samples=1000):
bins = np.geomspace(x_range[0], x_range[-1], num=nbins)
pdf = function(np.delete(bins,-1) + np.diff(bins)/2)
# Norm = np.sum(pdf*np.diff(bins))
Norm = np.trapz(pdf, x=np.delete(bins,-1) + np.diff(bins)/2)
pdf /= Norm
# cum_values = np.zeros(bins.shape)
cum_values = cumtrapz(pdf, x=np.delete(bins,-1) + np.diff(bins)/2, initial=0.0)
inv_cdf = interp1d(cum_values, np.delete(bins,-1) + np.diff(bins)/2)
r = np.random.rand(n_samples)
return inv_cdf(r)
def inverse_transform_sampling(function, x_range, nbins=1000, n_samples=1000):
bins = np.linspace(x_range[0], x_range[-1], num=nbins)
pdf = function(np.delete(bins,-1) + np.diff(bins)/2)
Norm = np.trapz(pdf, x=np.delete(bins,-1) + np.diff(bins)/2)
pdf /= Norm
# cum_values = np.zeros(bins.shape)
cum_values = cumtrapz(pdf, x=np.delete(bins,-1) + np.diff(bins)/2, initial=0.0)
inv_cdf = interp1d(cum_values, np.delete(bins,-1) + np.diff(bins)/2)
r = np.random.rand(n_samples)
return inv_cdf(r)
|
# get 请求
import requests
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf8') # Change default encoding to utf8
r = requests.get('https://www.baidu.com/')
print(type(r))
print(r.status_code)
print(type(r.text))
print(r.text)
print(r.cookies) |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import pytest
from unittest.mock import patch
from niacin.text.en import char
@pytest.mark.parametrize(
"string,p,l", [("", 0.0, 0), ("", 1.0, 0), ("bob", 0.0, 3), ("bob", 1.0, 6)]
)
def test_add_characters(string, p, l):
res = char.add_characters(string, p)
assert len(res) == l
@pytest.mark.parametrize("string", [(""), ("qwerty")])
def test_add_fat_thumbs(string):
res = char.add_fat_thumbs(string, 0.0)
assert res == string
res = char.add_fat_thumbs(string, 1.0)
for left, right in zip(res, string):
assert left != right
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("you what mate?", 0.0, "you what mate?"),
("you what mate?", 1.0, "u w@ m8?"),
("shadow banned", 1.0, "shad0w b&"),
],
)
def test_add_leet(string, p, exp):
res = char.add_leet(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("alice is not dead", 0.0, "alice is not dead"),
("alice is not dead", 1.0, "alice isn't dead"),
],
)
def test_add_contractions(string, p, exp):
res = char.add_contractions(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("alice isn't dead", 0.0, "alice isn't dead"),
("alice isn't dead", 1.0, "alice is not dead"),
],
)
def test_add_expansionss(string, p, exp):
res = char.remove_contractions(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,l", [("", 0.0, 0), ("", 1.0, 0), ("bob", 0.0, 3), ("bob", 1.0, 0)]
)
def test_remove_characters(string, p, l):
res = char.remove_characters(string, p)
assert len(res) == l
@pytest.mark.parametrize(
"string,p,l",
[
("", 0.0, 0),
("", 1.0, 0),
(r"""~`!'";:,.<>[]\_-""", 0.0, 16),
(r"""~`!'";:,.<>[]\#$""", 1.0, 0),
(r"""bob~`!'";:,.<>[]\@&""", 0.0, 19),
(r"""bob~`!'";:,.<>[]\{}""", 1.0, 3),
],
)
def test_remove_punctuation(string, p, l):
res = char.remove_punctuation(string, p)
assert len(res) == l
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("The man has a brown dog", 0.0, "The man has a brown dog"),
("The man has a brown dog", 1.0, "Themanhasabrowndog"),
],
)
def test_remove_whitespace(string, p, exp):
res = char.remove_whitespace(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[("", 0.0, ""), ("", 1.0, " "), ("dog", 0.0, "dog"), ("dog", 1.0, " d o g ")],
)
def test_add_whitespace(string, p, exp):
res = char.add_whitespace(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,choice,exp",
[
("", 0.0, 0, ""),
("", 1.0, 2, ""),
("haircut", 0.0, 0, "haircut"),
("haircut", 1.0, 0, ""),
("haircut", 0.0, 2, "haircut"),
("haircut", 1.0, 2, "hhaaiirrccuutt"),
],
)
def test_add_macbook_keyboard(string, p, choice, exp):
with patch('niacin.text.en.char.random.choice', return_value=choice) as mock:
res = char.add_macbook_keyboard(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("The man", 0.0, "The man"),
("The man", 1.0, "hT eamn"),
],
)
def test_swap_chars(string, p, exp):
res = char.swap_chars(string, p)
assert res == exp |
import turtle
import random
swidth, sheight, pSize, exitCount = 300, 300, 3, 0
r, g, b, angle, dist, curX, curY = [0] * 7
turtle.title('거북이가 맘대로 다니기')
turtle.shape('turtle')
turtle.pensize(pSize)
turtle.setup(width = swidth + 30, height = sheight + 30)
turtle.screensize(swidth, sheight)
while True:
r = random.random()
g = random.random()
b = random.random()
turtle.pencolor((r, g, b))
angle = random.randrange(0, 360)
dist = random.randrange(1, 100)
turtle.left(angle)
turtle.forward(dist)
curX = turtle.xcor()
curY = turtle.ycor()
if (-swidth / 2 <= curX and curX <= swidth / 2) and \
(-sheight / 2 <= curY and curY <= sheight / 2) :
pass
else:
turtle.penup()
turtle.goto(0, 0)
turtle.pendown()
exitCount += 1
if exitCount >= 5:
break
turtle.done() |
# Generated by Django 3.1.6 on 2021-02-04 04:58
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blogger', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 2, 4, 4, 58, 16, 824524, tzinfo=utc)),
),
]
|
#settings.py
class Settings():
""" """
def __init__(self):
"""initialize game and screen settings."""
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (135, 206, 250)
#ship speed settings
self.ship_limit = 3#numer of space ships per game
# Bullet settings
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = 60, 60, 60
#Alien settings
self.fleet_drop_speed = 10
# How quickly the game speeds up
self.speedup_scale = 1.2
# How quickly the alien point value increases
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
"""Init settings that change during the game"""
self.ship_speed = 1.5#move 1.5 pixels per movement
self.bullet_speed = 5
self.alien_speed_factor = 1
#fleet directior of 1 represents right, -1 represents left
self.fleet_direction = 1
#scoring
self.alien_points = 100
def increase_speed(self):
"""Increase speed settings and alien point values"""
self.ship_speed *= self.speedup_scale
self.bullet_speed *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
self.alien_points = int(self.alien_points * self.score_scale)
|
#!/usr/bin/python
#\file rviz2.py
#\brief certain python script
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Nov.25, 2019
import roslib; roslib.load_manifest('std_msgs')
import rospy
import tf
import visualization_msgs.msg
import geometry_msgs.msg
import math
#Convert x to geometry_msgs/Pose
def XToGPose(x):
pose= geometry_msgs.msg.Pose()
pose.position.x= x[0]
pose.position.y= x[1]
pose.position.z= x[2]
pose.orientation.x= x[3]
pose.orientation.y= x[4]
pose.orientation.z= x[5]
pose.orientation.w= x[6]
return pose
if __name__=='__main__':
rospy.init_node('ros_min')
viz_pub= rospy.Publisher('visualization_marker', visualization_msgs.msg.Marker, queue_size=1)
t= rospy.Time.now()
while not rospy.is_shutdown():
marker= visualization_msgs.msg.Marker()
marker.header.frame_id= 'base_link'
marker.header.stamp= rospy.Time.now()
marker.ns= 'visualizer'
marker.id= 0
marker.action= visualization_msgs.msg.Marker.ADD # or DELETE
marker.lifetime= rospy.Duration(1.0)
marker.type= visualization_msgs.msg.Marker.CUBE # or CUBE, SPHERE, ARROW, CYLINDER
marker.scale.x= 0.2
marker.scale.y= 0.2
marker.scale.z= 0.2
marker.color.a= 1.0
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 0.0
marker.pose= XToGPose([0.0,0.0,0.0, 0.0,0.0,0.0,1.0])
marker.pose.position.x= 1.0*math.sin((rospy.Time.now()-t).to_sec())
viz_pub.publish(marker)
rospy.sleep(0.05)
viz_pub.publish()
viz_pub.unregister()
|
# coding=utf-8
# 个人主页图片:包括个人照片和作品集
import json
import threading
from sqlalchemy import desc
from BaseHandlerh import BaseHandler
from Database.models import get_db
from Database.tables import User, UserHomepageimg, UserCollection, UserLike
from FileHandler.Upload import AuthKeyHandler
from Userinfo.UserImgHandler import UserImgHandler
class Userhpimg(BaseHandler):
def post(self):
type = self.get_argument('type')
# 添加个人图片:第一步返回token
if type=='10808':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
imgs = self.get_argument('imgs')
auth_key = self.get_argument('authkey')
try:
userid = self.db.query(User).filter(User.Uid == u_id).one()
key = userid.Uauthkey
if key == auth_key: #验证通过
print '验证通过'
ap_imgs_json = json.loads(imgs)
retjson_body = {'auth_key': '', 'uid': ''}
auth_key_handler = AuthKeyHandler()
retjson_body['auth_key'] = auth_key_handler.generateToken(ap_imgs_json) # 上传凭证
retjson_body['uid']= userid.Uid
retjson['contents'] = retjson_body # 返回图片的token
retjson['code']= '10808'
except Exception, e:
print e
retjson['code']='10809'
retjson['contents']='验证失败'
# 插入图片第二步(插入数据库)
elif type == '10810':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
imgs = self.get_argument('imgs')
auth_key = self.get_argument('authkey')
try:
userid = self.db.query(User).filter(User.Uid==u_id).one()
key = userid.Uauthkey
if key== auth_key: #验证通过
print '验证通过'
ap_imgs_json = json.loads(imgs)
imhandler= UserImgHandler()
imhandler.insert_Homepage_image(ap_imgs_json,u_id)
retjson['code']= '10810'
retjson['contents'] = '数据库操作成功'
else:
print '验证码错误'
retjson['code'] = '10810'
retjson['contents'] = '验证未通过'
except Exception, e:
print e
retjson['code']='10810'
retjson['contents']='查找用户失败'
# 删除图片
elif type == '10811':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
imgs = self.get_argument('imgs')
auth_key = self.get_argument('authkey')
try:
userid = self.db.query(User).filter(User.Uid == u_id).one()
key = userid.Uauthkey
if key == auth_key: # 验证通过
print '验证通过'
print imgs
ap_imgs_json = json.loads(imgs)
print ap_imgs_json
imhandler = UserImgHandler()
imhandler.delete_Homepage_image(ap_imgs_json, u_id)
retjson['code'] = '10811'
retjson['contents'] = '数据库操作成功'
else:
print '验证码错误'
retjson['code'] = '10811'
retjson['contents'] = '验证未通过'
except Exception, e:
print e
retjson['code'] = '10811'
retjson['contents'] = '查找用户失败'
# 获取个人主页缩略图(200*200)
elif type == '10812':
retjson = {'code': '', 'contents': ""}
uhuser = self.get_argument('uid')
authkey = self.get_argument('authkey')
try:
userid = self.db.query(User).filter(User.Uauthkey == authkey).one()
if userid: # 验证通过
img= UserImgHandler()
try:
piclist = img.UHgetsquarepic(uhuser)
retjson['code']='10812'
retjson['contents']= piclist
except Exception, e:
print e
retjson['code'] = '10811'
retjson['contents'] = '获取图片信息失败'
else:
print'认证错误'
retjson['code']='10813'
retjson['contents']= '用户认证错误'
except Exception, e:
print e
retjson['code']='10813'
retjson['contents']='未找到该用户'
# 获取个人缩略图片和大图url(个人主页点进去)
elif type == '10814':
retjson = {'code': '', 'contents': ""}
uhuser = self.get_argument('uid')
authkey = self.get_argument('authkey')
try:
userid = self.db.query(User).filter(User.Uauthkey == authkey).one()
if int(userid.Uid) == int(uhuser):
isself = 0
else:
isself = 1
img= UserImgHandler()
piclist = img.UHpicget(uhuser)
piclist02 = img.UHpicgetassign(uhuser)
retjson['code'] = '10814'
retjson['isself'] = isself
retjson['originurl'] = piclist
retjson['contents'] = piclist02
except Exception, e:
print e
retjson['code']='10815'
retjson['contents']='未找到该用户'
# 发布作品集(第一步返回token)
elif type == '10804':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
imgs = self.get_argument('imgs')
auth_key = self.get_argument('authkey')
uc_title = self.get_argument('title')
try:
userid = self.db.query(User).filter(User.Uid==u_id).one()
key = userid.Uauthkey
if key== auth_key: # 验证通过
print '验证通过'
ap_imgs_json = json.loads(imgs)
retjson_body = {'auth_key': '', 'ucid': ''}
auth_key_handler = AuthKeyHandler()
retjson_body['auth_key'] = auth_key_handler.generateToken(ap_imgs_json) # 上传凭证
# 插入数据库
try:
new_usercollection = UserCollection(
UCuser=u_id,
UCtitle=uc_title,
UCcontent='',
UCvalid=0,
UCiscollection=0,
)
self.db.merge(new_usercollection)
self.db.commit()
except Exception,e :
print e
retjson['contents']='数据库插入失败'
try:
print '插入成功,进入查询'
uc = self.db.query(UserCollection).filter(
UserCollection.UCtitle == uc_title, UserCollection.UCuser == u_id).one()
ucid = uc.UCid
retjson_body['ucid'] = ucid
retjson['contents'] = retjson_body
retjson['code']='10804'
except Exception, e:
print '插入失败!!'
retjson['contents'] = r'服务器插入失败'
else:
retjson['code']= '10805'
retjson['contents']= '用户认证失败'
except Exception, e:
print e
retjson['code'] = '10805'
retjson['contents'] = "该用户名不存在"
# 发布作品集(第二步插入数据库)
elif type == '10806':
retjson = {'code': '', 'contents': ""}
print "进入10806"
uc_id = self.get_argument('ucid')
auth_key = self.get_argument('authkey')
ap_title = self.get_argument('title')
uc_content = self.get_argument('content')
uc_imgs = self.get_argument('imgs')
try:
self.db.query(UserCollection).filter(UserCollection.UCid == uc_id).\
update({ UserCollection.UCcontent: uc_content,
UserCollection.UCvalid: 1,
UserCollection.UCtitle:ap_title,
}, synchronize_session=False)
self.db.commit()
print '更新完成'
try:
imghandler = UserImgHandler()
uc_images_json = json.loads(uc_imgs)
imghandler.insert_UserCollection_image(uc_images_json, uc_id)
self.db.commit()
retjson['code'] = '10806'
retjson['contents'] = '修改/发布作品集成功'
except Exception, e:
print e
retjson['code']='10806'
retjson['contents'] = u'插入图片表失败'
except Exception, e:
print e
#删除【一个】作品集的图片
# 作品集删除图片
elif type == '10820':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
imgs = self.get_argument('imgs')
auth_key = self.get_argument('authkey')
uc_id = self.get_argument('ucid')
try:
userid = self.db.query(User).filter(User.Uid == u_id).one()
key = userid.Uauthkey
if key == auth_key: # 验证通过
print '作品集删除' \
'图片验证通过'
ap_imgs_json = json.loads(imgs)
imhandler = UserImgHandler()
imhandler.delete_UserCollection_image(ap_imgs_json,uc_id)
get_db().commit()
retjson['code'] = '10820'
retjson['contents'] = '数据库操作成功'
else:
retjson['code'] = '10821'
retjson['contents'] = '认证失败'
except Exception, e:
print e
retjson['code'] = '10821'
retjson['contents'] = '未找到此用户'
#删除作品集
elif type == '10826':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
auth_key = self.get_argument('authkey')
uc_id = self.get_argument('ucid')
try:
userid = self.db.query(User).filter(User.Uid == u_id).one()
key = userid.Uauthkey
if key == auth_key: # 验证通过
print '删除作品集验证通过'
ucidlist = json.loads(uc_id)
for item in ucidlist:
user_collection = self.db.query(UserCollection).filter(UserCollection.UCid == item).one()
user_collection.UCvalid = 0
self.db.commit()
retjson['code'] = '10826'
retjson['contents'] = '删除作品集成功'
else:
retjson['code'] = '10827'
retjson['contents'] = '认证失败'
except Exception, e:
print e
retjson['code'] = '10827'
retjson['contents'] = '未找到此用户'
# 作品集添加图片step1
elif type == '10822':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
imgs = self.get_argument('imgs')
auth_key = self.get_argument('authkey')
try:
userid = self.db.query(User).filter(User.Uid == u_id).one()
key = userid.Uauthkey
if key == auth_key: # 验证通过
print '作品集添加图片(first step)'
ap_imgs_json = json.loads(imgs)
auth_key_handler = AuthKeyHandler()
retjson['contents'] = auth_key_handler.generateToken(ap_imgs_json) # 返回图片的token
retjson['code'] = '10822'
except Exception, e:
print e
retjson['code'] = '10823'
retjson['contents'] = '验证失败'
# 添加作品集图片
elif type == '10824':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
imgs = self.get_argument('imgs')
auth_key = self.get_argument('authkey')
uc_id = self.get_argument('ucid')
uc_content =self.get_argument('content')
uc_title = self.get_argument('title')
try:
userid = self.db.query(User).filter(User.Uid == u_id).one()
key = userid.Uauthkey
if key == auth_key: # 验证通过
print '验证通过'
self.db.query(UserCollection).filter(UserCollection.UCid == uc_id). \
update({UserCollection.UCcontent: uc_content,
UserCollection.UCvalid: 1,
UserCollection.UCtitle: uc_title,
}, synchronize_session=False)
self.db.commit()
print '更新完成'
ap_imgs_json = json.loads(imgs)
imhandler = UserImgHandler()
imhandler.insert_UserCollection_image(ap_imgs_json,uc_id)
self.db.commit()
retjson['code'] = '10824'
retjson['contents'] = '数据库操作成功'
else:
retjson['code'] = '10824'
retjson['contents'] = '认证失败'
except Exception, e:
print e
retjson['code'] = '10824'
retjson['contents'] = '未找到此用户'
# 获取作品集列表(缩略图+时间标题)
elif type == '10818':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
auth_key = self.get_argument("authkey")
try:
userid = self.db.query(User).filter(User.Uauthkey == auth_key).one()
if int(userid.Uid) == int(u_id):
isself = 0
else:
isself = 1
retjson['isself'] = isself
imghandler = UserImgHandler()
retdata = []
pic = self.db.query(UserCollection).filter(UserCollection.UCuser == u_id,UserCollection.UCvalid == 1).all()
print '进入作品集列表获取'
try:
for item in pic:
retdata.append(imghandler.UC_simple_model(item, u_id))
retjson['code'] = '10818'
retjson['contents'] = retdata
except Exception, e:
print e
except Exception, e:
print e
retjson['contents'] = '用户认证失败'
# 获取【单个】作品集信息(包括缩略图url和大图url)
elif type == '10816':
print ''
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
auth_key = self.get_argument("authkey")
uc_id = self.get_argument('ucid')
imghandler=UserImgHandler()
try:
userid = self.db.query(User).filter(User.Uauthkey == auth_key).one()
try:
pic = self.db.query(UserCollection).filter(UserCollection.UCid == uc_id).one()
if int(userid.Uid) == int(u_id):
isself = 0
else:
isself = 1
retjson['code'] = '10816'
retjson['isself'] = isself
retjson['contents'] = imghandler.UCmodel(pic, u_id, userid.Uid)
except Exception, e:
print e
retjson['code']='10817'
retjson['contents'] = '没有这个作品'
except Exception, e:
print e
retjson['contents'] = '用户认证失败'
# 获取个人主页作品集封面(200*200)
elif type == '10828':
retjson = {'code': '', 'contents': ""}
u_id = self.get_argument('uid')
auth_key = self.get_argument("authkey")
try:
userid = self.db.query(User).filter(User.Uauthkey == auth_key).one()
imghandler = UserImgHandler()
retdata = []
pic = self.db.query(UserCollection).filter(UserCollection.UCuser == u_id,UserCollection.UCvalid == 1).all()
print '进入作品集列表获取'
try:
for item in pic:
retdata.append(imghandler.UC_homepage_model(item,u_id))
retjson['code'] = '10828'
retjson['contents'] = retdata
except Exception, e:
print e
except Exception, e:
print e
retjson['contents'] = '用户认证失败'
# 获取作品集列表:好友
elif type == '10830':
retjson = {'code': '10831', 'contents': ''}
retdata = []
auth_key = self.get_argument("authkey")
try:
userid = self.db.query(User).filter(User.Uauthkey == auth_key).one() # 用户本身
imghandler = UserImgHandler()
friendlist = imghandler.friendlist(userid.Uid)
print '进入作品集列表获取'
print userid.Uid
print friendlist
try:
UserCollecions = self.db.query(UserCollection).filter(UserCollection.UCuser.in_(friendlist),
UserCollection.UCvalid == 1).\
order_by(desc(UserCollection.UCid)).limit(6).all()
for item in UserCollecions:
retdata.append(imghandler.UC_login_model(item, item.UCuser, userid.Uid))
retjson['code'] = '10830'
retjson['contents'] = retdata
except Exception, e:
print e
retjson['contents'] = '获取作品集列表失败'
except Exception, e:
print e
retjson['contents'] = '用户认证失败'
# 刷新好友作品集
elif type == '10832':
print '请求更多好友作品集'
retjson = {'code': '10833', 'contents': ''}
retdata = []
auth_key = self.get_argument("authkey")
lastucid = self.get_argument('index')
try:
userid = self.db.query(User).filter(User.Uauthkey == auth_key).one() # 用户本身
imghandler = UserImgHandler()
friendlist = imghandler.friendlist(userid.Uid)
print '进入作品集列表获取'
print userid.Uid
print friendlist
try:
UserCollecions = self.db.query(UserCollection).filter(UserCollection.UCuser.in_(friendlist),
UserCollection.UCvalid == 1,
UserCollection.UCid < lastucid). \
order_by(desc(UserCollection.UCid)).limit(6).all()
for item in UserCollecions:
retdata.append(imghandler.UC_login_model(item, item.UCuser, userid.Uid))
retjson['code'] = '10832'
retjson['contents'] = retdata
except Exception, e:
print e
retjson['contents'] = '获取作品集列表失败'
except Exception, e:
print e
retjson['contents'] = '用户认证失败'
# 获取推荐作品集
elif type == '10834':
retjson = {'code': '10835', 'contents': ''}
retdata = []
auth_key = self.get_argument("authkey")
try:
userid = self.db.query(User).filter(User.Uauthkey == auth_key).one() # 用户本身
imghandler = UserImgHandler()
reclist = imghandler.reclist(userid.Uid)
try:
UserCollecions = self.db.query(UserCollection).filter(UserCollection.UCuser.in_(reclist),
UserCollection.UCvalid == 1). \
order_by(desc(UserCollection.UCid)).limit(6).all()
for item in UserCollecions:
retdata.append(imghandler.UC_login_model(item, item.UCuser,userid.Uid))
retjson['code'] = '10834'
retjson['contents'] = retdata
except Exception, e:
print e
retjson['contents'] = '获取作品集列表失败'
except Exception, e:
print e
# 刷新推荐作品集
elif type == '10836':
retjson ={'code':'10837', 'contents':''}
retdata = []
auth_key = self.get_argument("authkey")
lastucid = self.get_argument('index')
try:
userid = self.db.query(User).filter(User.Uauthkey == auth_key).one() # 用户本身
imghandler = UserImgHandler()
reclist = imghandler.reclist(userid.Uid)
print '进入作品集列表获取'
try:
UserCollecions = self.db.query(UserCollection).filter(UserCollection.UCuser.in_(reclist),
UserCollection.UCvalid == 1,
UserCollection.UCid < lastucid). \
order_by(desc(UserCollection.UCid)).limit(6).all()
for item in UserCollecions:
retdata.append(imghandler.UC_login_model(item, item.UCuser, userid.Uid))
retjson['code'] = '10836'
retjson['contents'] = retdata
except Exception, e:
print e
retjson['contents'] = '获取作品集列表失败'
except Exception, e:
print e
retjson['contents'] = '用户认证失败'
self.write(json.dumps(retjson, ensure_ascii=False, indent=2))
|
"""
This module is related to the usage of BigDFT with Fragment-related Quantities.
Input as well as Logfiles might be processed with the classes and methods
provided by it.
The main two classes for this module are :class:`BigDFT.Fragments.System` and
:class:`BigDFT.Fragments.Fragment`. A System is a named collection of
fragments, and a Fragment is a list of atoms. Thus a System behaves much like
a dictionary, whereas a Fragment behaves more like a list. All of the basic
dictionary and list like operations can be applied.
"""
from futile.Utils import write as safe_print
try:
from collections.abc import MutableMapping, MutableSequence
except ImportError:
from collections import MutableMapping, MutableSequence
def GetFragTuple(fragid):
"""
Fragment ids should have the form: "NAME:NUMBER". This splits the fragment
into the name and number value.
Args:
fragid (str): the fragment id string.
Return:
(tuple): fragment name, fragment number
"""
return (fragid.split(":")[0], fragid.split(":")[1])
def distance(i, j, cell=None):
"""
Distance between fragments, defined as distance between center of mass
Args:
i (Fragment): first fragment.
j (Fragment): second fragment.
cell (array): an array describing the unit cell.
Returns:
(float): the distance between centers of mass.
"""
import numpy
vec = i.centroid - j.centroid
if cell:
per = numpy.where(cell > 0.0)
for i, p in enumerate(per):
vec -= cell[i] * int(round(vec[i] / cell[i]))
return numpy.sqrt(numpy.dot(vec, vec.T))
def pairwise_distance(i, j, cell=None):
"""
Distance between fragments, as defined by the distance between their two
nearest atoms.
Args:
i (Fragment): first fragment.
j (Fragment): second fragment.
cell (array): an array describing the unit cell.
Returns:
(float): the pairwise distance between fragments.
"""
if not cell:
from scipy.spatial.distance import cdist
pos_i = [at.get_position() for at in i]
pos_j = [at.get_position() for at in j]
dmat = cdist(pos_i, pos_j)
dist = dmat.min()
else:
dist = distance(Fragment([i[0]]), Fragment([j[0]]), cell)
for at_i in i:
for at_j in j:
new_d = distance(Fragment([at_i]), Fragment([at_j]), cell)
if new_d < dist:
dist = new_d
return dist
def plot_fragment_information(axs, datadict, colordict=None, minval=None):
"""
Often times we want to plot measures related to the differnet fragments
in a system. For this routine, you can pass a dictionary mappin
fragment ids to some kind of value. This routine takes care of the
formatting of the axis to easily read the different fragment names.
Args:
axs (matplotlib.Axes): an axes object to plot on.
datadict (dict): a dictionary from fragment ids to some kind of data
value.
colordict (dict): optionally, a dictionary from fragment ids to a
color value.
minval (float): only values above this minimum value are plotted.
"""
from BigDFT.Fragments import GetFragTuple
# Compute minval
if not minval:
minval = min(datadict.values())
# Sort by fragment id
slabels = sorted(datadict.keys(),
key=lambda x: int(GetFragTuple(x)[1]))
svalues = [datadict[x] for x in slabels]
# Remove values below the minimum
slabels = [x for x in slabels if datadict[x] >= minval]
svalues = [x for x in svalues if x >= minval]
# Label the axis by fragments
axs.set_xlabel("Fragment", fontsize=12)
axs.set_xticks(range(len(datadict.keys())))
axs.set_xticklabels(slabels, rotation=90)
# Plot the actual values
axs.plot(svalues, 'x', markersize=12, color='k')
# Plot the colored values.
if colordict:
for i, key in enumerate(slabels):
if key not in colordict:
continue
axs.plot(i, svalues[i], 'x', markersize=12,
color=colordict[key])
class Lattice():
"""
Defines the fundamental objects to deal with periodic systems
"""
def __init__(self, vectors):
self.vectors = vectors
def grid(self, origin=[0.0, 0.0, 0.0], extremes=None, radius=None):
"""
Produces a set of translation vectors from a given origin
"""
import numpy as np
transl = []
g = [[], [], []] # the grid of discrete translations
if extremes is not None:
# print extremes
for i, d in enumerate(extremes):
for k in range(d[0], d[1] + 1):
g[i].append(k)
# print g
for i in g[0]:
arri = np.array(self.vectors[0]) * i
for j in g[1]:
arrj = np.array(self.vectors[1]) * j + arri
for k in g[2]:
arrk = np.array(self.vectors[2]) * k + arrj
vect = np.array(origin) + arrk
app = True
if radius is not None:
app = np.linalg.norm(arrk) < radius
if app:
transl.append(vect)
return transl
class RotoTranslation():
"""
Define a transformation which can be applied to a fragment. This
rotation is defined by giving this class two fragments, and the
rototranslation between those fragments is automatically computed.
Args:
frag1 (BigDFT.Fragments.Fragment): the first position.
frag2 (BigDFT.Fragments.Fragment): the second position.
"""
def __init__(self, frag1, frag2):
try:
from BigDFT import wahba
from numpy import matrix
# Setup each fragment as a matrix of positions
pos1 = matrix([at.get_position() for at in frag1])
pos2 = matrix([at.get_position() for at in frag2])
# Compute transformation
self.R, self.t, self.J = wahba.rigid_transform_3D(pos1, pos2)
except Exception as e:
safe_print('Error', e)
self.R, self.t, self.J = (None, None, 1.0e10)
def dot(self, frag):
"""
Apply the rototranslations on a fragment.
Args:
frag (BigDFT.Fragments.Fragment): the fragment to rototranslate.
Return:
(BigDFT.Fragments.Fragment): the rototranslated fragment.
"""
from BigDFT import wahba as w
from numpy import matrix, array
from copy import deepcopy
# Convert to a position matrix
pos = matrix([at.get_position() for at in frag])
# Apply roto-translation
if self.t is None:
res = w.apply_R(self.R, pos)
elif self.R is None:
res = w.apply_t(self.t, pos)
else:
res = w.apply_Rt(self.R, self.t, pos)
# Copy back to the fragment
newfrag = deepcopy(frag)
for i in range(0, len(newfrag)):
newfrag[i].set_position([float(x) for x in array(res[i, :])[0]])
return newfrag
def invert(self):
"""
Computes the inverse rototranslation.
"""
self.t = -self.t
if self.R is not None:
self.R = self.R.T
class Translation(RotoTranslation):
"""
This class defines a simple translation.
Args:
t (list): the vector describing the translation.
"""
def __init__(self, t):
import numpy
self.R = None
self.t = numpy.mat(t).reshape(3, 1)
self.J = 0.0
class Rotation(RotoTranslation):
"""
This class defines a simple rotation.
Args:
t (list): the vector describing the rotation.
"""
def __init__(self, R):
self.t = None
self.R = R
self.J = 0.0
def interpolate_fragments(A, B, steps, extrapolation_steps=0):
"""
Given two fragments A and B, this generates a list of Fragments
that interpolate between A and B in a specified number of steps.
Args:
A (BigDFT.Fragments.Fragment): starting fragment.
B (BigDFT.Fragments.Fragment): ending fragment.
steps (int): the number of steps to take between A and B.
extrapolation_steps (int): optionally, we can extrapolate a number of
steps beyond B on the same trajectory.
Returns:
(list): a list of fragments interpolating between A and B including
A and B.
"""
from numpy import matrix, array
from BigDFT.wahba import interpolate_points
from copy import deepcopy
pos1 = matrix([at.get_position() for at in A])
pos2 = matrix([at.get_position() for at in B])
point_list = interpolate_points(pos1, pos2, steps, extrapolation_steps)
frag_list = []
for i in range(0, steps+extrapolation_steps+2):
new_frag = deepcopy(A)
for j in range(0, len(new_frag)):
new_frag[j].set_position([float(x)
for x in array(point_list[i][j])[0]])
frag_list.append(new_frag)
return frag_list
class Fragment(MutableSequence):
"""
A fragment is a list of atoms in a system. Fragment might have quantities
associated to it, like its electrostatic multipoles (charge, dipole, etc.)
and also geometrical information (center of mass, principla axis etc.). A
Fragment might also be rototranslated and combined with other moieteies to
form a :class:`BigDFT.Fragments.System`.
Args:
atomlist (list): list of atomic dictionaries defining the fragment
xyzfile (XYZReader): an XYZ file to read from.
posinp (dict): the posinp style dictionary from a logfile/input file.
.. todo::
Define and describe if this API is also suitable for solid-state
fragments
"""
def __init__(self, atomlist=None, xyzfile=None, posinp=None, astruct=None):
from BigDFT.Atom import Atom
self.atoms = []
# insert atoms.
if atomlist:
for atom in atomlist:
self.append(Atom(atom))
elif xyzfile:
with xyzfile:
for line in xyzfile:
self.append(Atom(line))
elif posinp:
units = posinp.get('units', 'angstroem')
for atom in posinp['positions']:
self.append(Atom(atom, units=units))
elif astruct:
units = astruct.get('units', 'angstroem')
rshift = astruct.get('Rigid Shift Applied (AU)', [0.0, 0.0, 0.0])
for atom in astruct["positions"]:
self.append(Atom(atom, units=units))
self.translate([-1.0*x for x in rshift])
# Values
self.purity_indicator = None
self.q0 = None
self.q1 = None
self.q2 = None
self.frozen = None
self.conmat = None
def __len__(self):
return len(self.atoms)
def __delitem__(self, index):
self.atoms.__delitem__(index)
def insert(self, index, value):
from BigDFT.Atom import Atom
self.atoms.insert(index, Atom(value))
def __setitem__(self, index, value):
from BigDFT.Atom import Atom
self.atoms.__setitem__(index, Atom(value))
def __getitem__(self, index):
# If they ask for only one atom, then we return it as an atom.
# but if it's a range we return a ``Fragment`` with those atoms in it.
if isinstance(index, slice):
return Fragment(atomlist=self.atoms.__getitem__(index))
else:
return self.atoms.__getitem__(index)
def __add__(self, other):
from copy import deepcopy
rval = deepcopy(self)
rval += other
return rval
@property
def centroid(self):
"""
The center of a fragment.
"""
from numpy import mean, ravel
pos = [at.get_position() for at in self]
return ravel(mean(pos, axis=0))
def center_of_charge(self, zion):
"""
The charge center which depends both on the position and net charge
of each atom.
"""
from numpy import array
cc = array([0.0, 0.0, 0.0])
qtot = 0.0
for at in self:
netcharge = at.q0
zcharge = zion[at.sym]
elcharge = zcharge - netcharge
cc += elcharge * array(at.get_position())
qtot += elcharge
return cc / qtot
def d0(self, center=None):
"""
Fragment dipole, calculated only from the atomic charges.
Args:
center (list): the center of charge of the fragment.
If this is not present, the centroid is used.
"""
from numpy import zeros, array
# one might added a treatment for non-neutral fragments
# but if the center of charge is used the d0 value is zero
if center is not None:
cxyz = center
else:
cxyz = self.centroid
d0 = zeros(3)
found = False
for at in self:
if at.q0 is not None:
found = True
d0 += at.q0 * (array(at.get_position()) - array(cxyz))
if found:
return d0
else:
return None
def d1(self, center=None):
"""
Fragment dipole including the atomic dipoles.
Args:
center (list): the center of charge of the fragment.
If this is not present, the centroid is used.
"""
from numpy import zeros
d1 = zeros(3)
dtot = self.d0(center)
if dtot is None:
return dtot
found = False
for at in self:
if at.q1 is not None:
found = True
d1 += at.q1
if found:
return d1 + dtot
else:
return None
pass
def ellipsoid(self, center=0.0):
"""
Todo: define the ellipsoid.
"""
import numpy as np
Imat = np.mat(np.zeros(9).reshape(3, 3))
for at in self:
rxyz = np.array(at.get_position()) - center
Imat[0, 0] += rxyz[0]**2 # rxyz[1]**2+rxyz[2]**2
Imat[1, 1] += rxyz[1]**2 # rxyz[0]**2+rxyz[2]**2
Imat[2, 2] += rxyz[2]**2 # rxyz[1]**2+rxyz[0]**2
Imat[0, 1] += rxyz[1] * rxyz[0]
Imat[1, 0] += rxyz[1] * rxyz[0]
Imat[0, 2] += rxyz[2] * rxyz[0]
Imat[2, 0] += rxyz[2] * rxyz[0]
Imat[1, 2] += rxyz[2] * rxyz[1]
Imat[2, 1] += rxyz[2] * rxyz[1]
return Imat
def fragment_transformation(self, frag2):
"""
Returns the transformation among fragments if it exists.
Args:
frag2 (BigDFT.Fragments.Fragment): the fragment to transform between.
Returns:
(BigDFT.Fragments.RotoTranslation) : the transformation matrix.
"""
from numpy import mat
pos1 = mat(self.centroid)
pos2 = mat(frag2.centroid)
return RotoTranslation(pos1, pos2)
def get_external_potential(self, units="bohr", charge_offset=False):
"""
Transform the fragment information into a dictionary ready to be
put as an external potential.
Args:
units (str): the units of the external potential.
Returns:
(dict): a dictionary describing the external potential for use in
an input file.
"""
pot = [at.get_external_potential(units) for at in self]
return pot
def get_net_force(self):
"""
Returns the net force on a fragment in Ha/Bohr.
Returns:
(list) Three values which describe the net force.
"""
from numpy import array
ret_val = array([0.0, 0.0, 0.0])
for at in self:
ret_val += array(at.get_force())
return [float(x) for x in ret_val]
def line_up(self):
"""
Align the principal axis of inertia of the fragments along the
coordinate axis. Also shift the fragment such as its centroid is zero.
"""
from numpy.linalg import eig
Shift = Translation(self.centroid)
Shift.invert()
self.transform(Shift)
# now the centroid is zero
Imat = self.ellipsoid()
w, v = eig(Imat)
Redress = Rotation(v.T)
self.transform(Redress)
# now the principal axis of inertia are on the coordinate axis
@property
def qcharge(self):
"""
The net charge on a fragment.
"""
from BigDFT.Atom import nzion
netcharge = self.q0[0]
for at in self:
if "nzion" in at:
zcharge = at.qcharge["nzion"]
else:
zcharge = nzion(at.sym)
netcharge += zcharge
return netcharge
def rotate(self, x=None, y=None, z=None, units="radians"):
"""
Rotate the fragment.
Args:
x (float): angle to rotate on the x axis.
y (float): angle to rotate on the y axis.
z (float): angle to rotate on the z axis.
units (str): either radians or degrees.
"""
from math import cos, sin, pi
from numpy import mat, identity
from copy import deepcopy
# Deal with the units.
if units == "degrees":
if x:
xval = x * pi / 180
if y:
yval = y * pi / 180
if z:
zval = z * pi / 180
elif units == "radians":
xval = x
yval = y
zval = z
else:
raise ValueError("Units must be degrees or radians")
# Translate back to the origin.
centroid = self.centroid
self.translate(-1.0 * centroid)
# Build the rotation
rot = identity(3)
if x:
rx = mat([
[1.0, 0.0, 0.0],
[0.0, cos(xval), -sin(xval)],
[0.0, sin(xval), cos(xval)]
])
rot = rx.dot(rot)
if y:
ry = mat([
[cos(yval), 0.0, sin(yval)],
[0.0, 1.0, 0.0],
[-sin(yval), 0.0, cos(yval)]
])
rot = ry.dot(rot)
if z:
rz = mat([
[cos(zval), -sin(zval), 0.0],
[sin(zval), cos(zval), 0.0],
[0.0, 0.0, 1.0]
])
rot = rz.dot(rot)
# Rotate
rt = Rotation(rot)
rself = rt.dot(self)
for i in range(0, len(self)):
self[i] = deepcopy(rself[i])
# Translate back
self.translate(centroid)
def translate(self, vec):
"""
Translate the fragment along the vector provided.
Args:
vec (list): a list of x, y, z values describing the translation (AU).
"""
from copy import deepcopy
rt = Translation(vec)
trans = rt.dot(self)
for i in range(0, len(self)):
self[i] = deepcopy(trans[i])
def system_from_log(log, fragmentation=None):
"""
This function returns a :class:`~BigDFT.Fragment.System` class out of a
logfile. If the logfile contains information about fragmentation and atomic
multipoles, then the system is created accordingly.
Otherwise, the fragmentation scheme is determined by the fragmentation
variable.
Args:
log (Logfile): the logfile of the QM run. In general must have been done
with Linear Scaling formalism.
fragmentation (str): the scheme to be used for the fragmentation in the
case if not provided internally by the logfile.
The possible values are ``atomic`` and ``full``, in which case the
system as as many fragments as the number of atoms, or only one
fragment, respectively.
Returns:
(BigDFT.Fragments.System) The instance of the class containing
fragments.
"""
name = log.log.get('run_name', 'FULL') + ':0'
full_system = System()
if "posinp" in log.log:
posinp = log.log['posinp']
full_system[name] = Fragment(posinp=posinp)
else:
full_system[name] = Fragment(astruct=log.astruct)
# provide the atomic information on the system
if hasattr(log, 'electrostatic_multipoles'):
full_system.set_atom_multipoles(log)
if hasattr(log, 'forces'):
full_system.set_atom_forces(log)
# now we may defragment the system according to the provided scheme
if fragmentation == 'full':
return full_system
elif fragmentation == 'atomic' or 'posinp' not in log.log:
atomic_system = System()
for iat, at in enumerate(full_system[name]):
atomic_system['ATOM:' + str(iat)] = Fragment([at])
return atomic_system
else:
posinp = log.log['posinp']
frag_dict = {}
for iat, tupl in enumerate(zip(posinp['positions'],
full_system[name])):
at, obj = tupl
fragid = at.get('frag', 'ATOM:' + str(iat))
if isinstance(fragid, list):
fragid = str(fragid[0]) + ':' + str(fragid[1])
if fragid not in frag_dict:
frag_dict[fragid] = [obj]
else:
frag_dict[fragid].append(obj)
frag_system = System()
for fragid in frag_dict:
frag_system[fragid] = Fragment(frag_dict[fragid])
return frag_system
class System(MutableMapping):
"""
A system is defined as a named collection of fragments. You can manipulate
a system as if it were a standard python dictionary, however it also has
helper routines for performing operations on the full system.
"""
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs))
self.conmat = None
def dict(self):
"""
Convert to a dictionary.
"""
return self.store
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key
@property
def centroid(self):
"""
Center of mass of the system
"""
from numpy import mean
return mean([frag.centroid for frag in self.values()], axis=0)
@property
def central_fragment(self):
"""
Returns the fragment whose center of mass is closest to the centroid
Returns:
(str): the name of the fragment.
(Fragment): the fragment object
"""
import numpy as np
CMs = [frag.centroid for frag in self.values()]
idx = np.argmin([np.dot(dd, dd.T) for dd in (CMs - self.centroid)])
return list(self.keys())[idx], list(self.values())[idx]
def get_external_potential(self, units="bohr", charge_offset=False):
"""
Transform the system information into a dictionary ready to be
put as an external potential.
Args:
units (str): the units of the external potential.
charge_offset (bool): by default the external potential ignores the
counter charge from the protons. Setting this to true adds the
positive charge to the potential.
"""
ret_dict = {"units": units}
ret_dict["values"] = []
for frag in self.values():
ret_dict["values"].extend(
frag.get_external_potential(units, charge_offset))
ret_dict["global monopole"] = sum(
x["q0"][0] for x in ret_dict["values"])
return ret_dict
def get_k_nearest_fragments(self, target, k, cutoff=None):
"""
Given a fragment id in a system, this computes the nearest fragment.
Args:
target (str): the fragment to find the nearest neighbor of.
k (int): the number of fragments to look for.
cutoff (float): will only return fragments with a certain cutoff.
Returns:
(lists): the ids of the nearest fragments.
"""
from scipy.spatial import KDTree
# Setup the KD Tree for distance lookup
poslist = []
frag_lookup = []
for fragid, frag in self.items():
if fragid == target:
continue
for at in frag:
poslist.append(at.get_position())
frag_lookup.append(fragid)
tree = KDTree(poslist)
# Find the nearest fragments with a query of the tree.
targetpost = [x.get_position() for x in self[target]]
if cutoff is not None:
ndist, nearest = tree.query(targetpost, k=k,
distance_upper_bound=cutoff)
else:
ndist, nearest = tree.query(targetpost, k=k)
# We now have the nearest atom to each atom in this fragment.
# Next we combine this information and extract the closest
# fragments.
if k == 1:
ndist = [ndist]
nearest = [nearest]
distdict = {}
for i in range(0, len(nearest)):
for idx, dist in zip(nearest[i], ndist[i]):
try:
fragidx = frag_lookup[idx]
except IndexError:
# kdtree returns invalid indices if it can't find enough
# points.
continue
if fragidx not in distdict:
distdict[fragidx] = dist
elif distdict[fragidx] < dist:
distdict[fragidx] = dist
# Extract the k smallest values.
minlist = []
for i in range(0, k):
if len(distdict) == 0:
break
key = min(distdict, key=distdict.get)
minlist.append(key)
del distdict[key]
return minlist
def get_nearest_fragment(self, target):
"""
Given a fragment id in a system, this computes the nearest fragment.
Args:
target (str): the fragment to find the nearest neighbor of.
Returns:
(str): the id of the nearest fragment.
"""
return self.get_k_nearest_fragments(target, k=1)[0]
def get_net_force(self):
"""
Returns the net force on a system in Ha/Bohr.
Returns:
(list): Three values which describe the net force.
"""
from numpy import array
ret_val = array([0.0, 0.0, 0.0])
for frag in self.values():
ret_val += array(at.get_net_force())
return [float(x) for x in ret_val]
def get_posinp(self, units='angstroem'):
"""
Provide the dictionary which has to be passed to the ``posinp`` value
of the :meth:`run` method of the
:class:`~BigDFT.Calculators.SystemCalculator` class instance.
Args:
units (str): The units of the file. May be "angstroem" or "bohr".
"""
pos = []
for fragid, frag in self.items():
for at in frag:
atdict = {at.sym: at.get_position(units)}
atdict["frag"] = list(GetFragTuple(fragid))
if frag.frozen:
atdict["Frozen"] = frag.frozen
pos.append(atdict)
return {'units': units, 'positions': pos}
@property
def q0(self):
"""
Provides the global monopole of the system given as a sum of the
monopoles of the atoms.
"""
if len(self) == 0:
return None
return [sum(filter(None, [frag.q0[0] for frag in self.values()]))]
@property
def qcharge(self):
"""
The total qcharge of a system.
"""
return sum([frag.qcharge for frag in self.values()])
def rename_fragments(self):
"""
This procedure automatically names the fragments in a system.
Returns:
(System): the same system, with the automatic naming scheme.
"""
rnsys = System()
for i, fragid in enumerate(self):
rnsys["FRAG:"+str(i)] = self[fragid]
return rnsys
def set_atom_multipoles(self, logfile, correct_charge=True):
"""
After a run is completed, we have a set of multipoles defined on
each atom. This routine will set those values on to each atom
in the system.
Args:
logfile (Logfiles.Logfile): logfile with the multipole values.
correct_charge (bool): currently there is an inconsistency in
terms of gross charge, and this corrects it.
"""
mp = logfile.electrostatic_multipoles
for pole in mp["values"]:
pole["units"] = mp["units"]
lookup = self.compute_matching(mp["values"])
# Assign
for fragid, frag in self.items():
for i, at in enumerate(frag):
idx = lookup[fragid][i]
if idx >= 0:
at.set_multipole(mp["values"][idx], correct_charge)
def set_atom_forces(self, logfile):
"""
After a run is completed, we have the forces on each atom in the
logfile. This routine will set those values to each atom in this sytem.
Args:
logfile (Logfiles.Logfile): logfile with the forces.
"""
# We will use the multipoles to help figure out which force value
# is associated with which atom
mp = logfile.electrostatic_multipoles
lookup = self.compute_matching(mp["values"])
# Assign forces
forces = logfile.forces
for fragid, frag in self.items():
for i, at in enumerate(frag):
idx = lookup[fragid][i]
if idx >= 0:
at.set_force(list(forces[idx].values())[0])
def write_fragfile(self, filename, logfile):
"""
Write out the file needed as input to the fragment analysis routine.
Args:
filename (str): name of the file to write to.
logfile (Logfiles.Logfile): the log file this calculation is based
on (to ensure a matching order of atoms).
"""
from yaml import dump
# Extract the indices
mp = logfile.electrostatic_multipoles
lookup = self.compute_matching(mp["values"])
outlist = []
for fragid, frag in self.items():
outlist.append([])
for i, at in enumerate(frag):
idx = lookup[fragid][i]
outlist[-1].append(idx + 1)
# Write
with open(filename, "w") as ofile:
dump(outlist, ofile)
def write_pdb(self, filename):
"""
Write out a system to a pdb file.
Args:
filename (str): the file to write to.
"""
# Put all the data into this string.
outstr = ""
idx = 1
lookup = {}
for fragid, frag in self.items():
for i, at in enumerate(frag):
pos = [str("{:.3f}".format(x))
for x in at.get_position("angstroem")]
fragtuple = GetFragTuple(fragid)
line = list(" " * 80)
line[0:6] = "HETATM" # HETATM
line[7:11] = str(idx).rjust(4) # SERIAL NUMBER
line[12:16] = at.sym.ljust(4) # ATOM NAME
line[16:17] = " " # ALTERNATIVE LOCATION INDICATOR
line[17:20] = fragtuple[0][:3].ljust(3) # RESIDUE NAME
line[21:22] = "A" # CHAIN IDENTIFIER
line[22:26] = fragtuple[1].rjust(3) # RESIDUE SEQUENCE NUMBER
line[26:27] = " " # CODE FOR INSERTION OF RESIDUES
line[30:38] = pos[0].rjust(8) # X COORDINATE
line[38:46] = pos[1].rjust(8) # Y COORDINATE
line[46:54] = pos[2].rjust(8) # Z COORDINATE
line[54:60] = " " # OCCUPANCY
line[60:66] = " " # TEMPERATURE
line[72:76] = " " # SEGMENT IDENTIFIER
line[76:78] = at.sym.rjust(2) # ELEMENT SYMBOL
line[78:80] = " " # CHARGE
outstr += line + "\n"
# Keep track of the indexes
lookup[(fragid, i)] = idx
idx = idx + 1
# Write the connectivity information
if self.conmat is not None:
for fragid, frag in self.items():
for i, at in enumerate(frag):
connections = self.conmat[fragid][i]
line = list(" " * 80)
line[0:6] = "CONECT"
# SERIAL NUMBER
line[7:11] = str(lookup[(fragid, i)]).rjust(4)
if len(connections) > 0: # BOND SERIAL NUMBERS
line[12:16] = str(lookup[connections[0]]).rjust(4)
if len(connections) > 1:
line[17:21] = str(lookup[connections[1]]).rjust(4)
if len(connections) > 2:
line[22:26] = str(lookup[connections[2]]).rjust(4)
if len(connections) > 3:
line[27:31] = str(lookup[connections[3]]).rjust(4)
outstr += line + "\n"
# Finally, write out to file.
with open(filename, "w") as ofile:
ofile.write(outstr)
def compute_matching(self, atlist, shift=None):
"""
Frequently we are passed a list of atom like objects from which we
need to extract data and assign it to a system. However, a system
can potentially store those atoms in any order, and may not have
the same set of atoms. This helper routine creates a mapping between
this list view, to the dictionary view of the system class.
Args:
atlist (list): a list of atom like objects.
shift (list): if the positions in atlist are shifted by some constant
vector you can specify that here.
Returns:
(dict): a mapping from a system to indices in the atom list. If
an atom is not in the list, an index value of -1 is assigned.
"""
from BigDFT.Atom import Atom
from numpy import array
from scipy.spatial import KDTree
# Convert everything to pure positions to avoid overhead.
poslist = [array(Atom(x).get_position("bohr")) for x in atlist]
if shift is not None:
poslist = [x - array(shift) for x in poslist]
tree = KDTree(poslist)
# Seach for the mapping values
mapping = {}
for fragid, frag in self.items():
mapping[fragid] = []
for at in frag:
atpos = array(at.get_position("bohr"))
ndist, nearest = tree.query(atpos)
mapping[fragid].append(nearest)
return mapping
if __name__ == "__main__":
from BigDFT.XYZ import XYZReader, XYZWriter
from os.path import join
from os import system
from copy import deepcopy
safe_print("Read in an xyz file and build from a list.")
atom_list = []
with XYZReader(join("Database", "XYZs", "SiO.xyz")) as reader:
for at in reader:
atom_list.append(at)
frag1 = Fragment(atomlist=atom_list)
for at in frag1:
safe_print(at.sym, at.get_position())
safe_print("Centroid", frag1.centroid)
safe_print()
safe_print("Build from an xyz file directory.")
reader = XYZReader(join("Database", "XYZs", "Si4.xyz"))
frag2 = Fragment(xyzfile=reader)
for at in frag2:
safe_print(at.sym, at.get_position())
safe_print()
safe_print("We can combine two fragments with +=")
frag3 = deepcopy(frag1)
frag3 += frag2
for at in frag3:
safe_print(at.sym, at.get_position())
safe_print("Length of frag3", len(frag3))
safe_print()
safe_print("Since we can iterate easily, we can also write easily.")
with XYZWriter("test.xyz", len(frag3), "angstroem") as writer:
for at in frag3:
writer.write(at)
system("cat test.xyz")
safe_print()
safe_print("We can also extract using the indices")
safe_print(dict(frag3[0]))
sub_frag = frag3[1:3]
for at in sub_frag:
safe_print(dict(at))
safe_print()
safe_print("Now we move on to testing the system class.")
safe_print("We might first begin in the easiest way.")
sys = System(frag1=frag1, frag2=frag2)
for at in sys["frag1"]:
safe_print(dict(at))
for at in sys["frag2"]:
safe_print(dict(at))
safe_print()
safe_print("What if we want to combine two fragments together?")
sys["frag1"] += sys.pop("frag2")
for at in sys["frag1"]:
safe_print(dict(at))
safe_print("frag2" in sys)
safe_print()
safe_print("What if I want to split a fragment by atom indices?")
temp_frag = sys.pop("frag1")
sys["frag1"], sys["frag2"] = temp_frag[0:3], temp_frag[3:]
for at in sys["frag1"]:
safe_print(dict(at))
for at in sys["frag2"]:
safe_print(dict(at))
safe_print()
safe_print("Construct a system from an XYZ file.")
fname = join("Database", "XYZs", "BH2.xyz")
sys2 = System(frag1=Fragment(xyzfile=XYZReader(fname)))
safe_print("Split it to fragments")
sys2["frag1"], sys2["frag2"] = sys2["frag1"][0:1], sys2["frag1"][1:]
safe_print("And write to file")
with XYZWriter("test.xyz", len(frag3), "angstroem") as writer:
for fragid, frag in sys2.items():
for at in frag:
writer.write(at)
system("cat test.xyz")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.