blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a24dae33a26f6bfc4d530a5ec4d3612d10cb0797
|
f24081773b8469959bdb632a13f338f546fc3213
|
/data/pubmed/pubmed_random_splits/data/convert_pubmed_data.py
|
86390aa053bb7e2d2e6c628ab333a871abe1b07f
|
[] |
no_license
|
youngflyasd/bootstrapped_graph_diffusions
|
163ce462cdab033f96b6c2c598af316961bb9edd
|
91a6675084e6f78adff64680af84ae5f9df9e908
|
refs/heads/master
| 2021-09-16T01:25:09.896784
| 2018-06-14T07:54:49
| 2018-06-14T07:54:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,043
|
py
|
import networkx as nx
import cPickle
def get_graph(graph_file_path):
G = nx.Graph()
node_label_to_node_id = {}
current_open_node_id = 0
with open(graph_file_path) as f_graph:
next(f_graph)
next(f_graph)
for line in f_graph:
split_line = line.split("\t")
src = int(split_line[1].split("paper:")[1])
dst = int(split_line[3].split("paper:")[1])
if (not src in node_label_to_node_id):
node_label_to_node_id[src] = current_open_node_id
current_open_node_id += 1
if (not dst in node_label_to_node_id):
node_label_to_node_id[dst] = current_open_node_id
current_open_node_id += 1
src = node_label_to_node_id[src]
dst = node_label_to_node_id[dst]
G.add_edge(src, dst)
print "G.number_of_nodes()", G.number_of_nodes()
print "G.number_of_edges()", G.number_of_edges()
return G, node_label_to_node_id
def get_labels(label_file_path, node_label_to_node_id):
node_id_to_label = {}
label_name_to_label_id = {}
current_open_label_id = 0
with open(label_file_path) as f_label:
next(f_label)
next(f_label)
for line in f_label:
split_line = line.split("\t")
node_id = int(split_line[0])
node_id = node_label_to_node_id[node_id]
label_id = int(split_line[1].split("label=")[1])
if (not label_id in label_name_to_label_id):
label_name_to_label_id[label_id] = current_open_label_id
current_open_label_id += 1
label_id = label_name_to_label_id[label_id]
node_id_to_label[node_id] = label_id
return node_id_to_label
def main():
graph_file_path = "./Pubmed-Diabetes.DIRECTED.cites.tab"
label_file_path = "./Pubmed-Diabetes.NODE.paper.tab"
G, node_label_to_node_id = get_graph(graph_file_path)
node_id_to_label = get_labels(label_file_path, node_label_to_node_id)
print set(node_id_to_label.values())
nx.write_edgelist(G, "../graph/graph.csv", delimiter=",", data=False)
with open("../labels/labels", "w") as f_labels:
for node_id, label in node_id_to_label.iteritems():
f_labels.write(str(node_id) + "," + str(label) + "\n")
if __name__ == '__main__':
main()
|
[
"eliav@eliav-ar-vr.hfa.corp.google.com"
] |
eliav@eliav-ar-vr.hfa.corp.google.com
|
5be9dc6f2cb68fd7c7c2df8268bcd53e2d499608
|
c01e5db0b8477148c409423a43e014d9572f48ec
|
/app/db_credentials.py
|
d11ad518a97534222acf0043dd541f0c19dc4a1a
|
[] |
no_license
|
Terencetang11/RestaurantMgmt
|
6cc318fc7ecd66bf79bc7327681038bac65fc0b5
|
4f322c6ade6f2143e2d49601b670a59151c2fd5f
|
refs/heads/main
| 2023-06-01T07:20:43.679815
| 2021-06-24T18:30:45
| 2021-06-24T18:30:45
| 380,018,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# To actually have your app use this file, you need to RENAME the file to db_credentials.py
# You will need to enter your credentials to access your mariaDB instance.
# the following will be used by the db_connector.py and
# in turn by all the Python code in this codebase to interact with the database
host = 'YOUR_HOST_NAME'
user = 'YOUR_USER_NAME'
passwd = 'YOUR_PW'
db = 'YOUR_DB_NAME'
|
[
"terencetang11@gmail.com"
] |
terencetang11@gmail.com
|
b232b49976fc49f6a6cc56d6f53152a58d22f712
|
8a04c450227a688f495b79a4670fda88e062a1ae
|
/dev/dev/make.py
|
186fcb04b9044d9883c94526b5c104baa2c94fe4
|
[
"MIT"
] |
permissive
|
ashleychontos/pySYD
|
f437505407589a94c3ab7bed801727d5a7d6a602
|
fef0060b93da126c31ea9c0768d3c5f23e44e815
|
refs/heads/master
| 2023-08-18T06:22:00.772019
| 2023-07-28T20:56:41
| 2023-07-28T20:56:41
| 238,135,587
| 33
| 10
|
MIT
| 2023-08-10T04:43:56
| 2020-02-04T06:06:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,732
|
py
|
# SOURCE PACKAGE ---> DEVELOPMENT BRANCH
print('\n\n COPYING FROM SOURCE PACKAGE ---> DEVELOPMENT BRANCH \n\n')
from pysyd import utils
cont = utils._ask_yesno('continue? ')
if cont:
import os
scripts = ['cli', 'models', 'pipeline', 'plots', 'target', 'utils']
rows, rows_cli = 18, 32
_ROOT, _ = os.path.split(os.path.abspath(os.getcwd()))
package = os.path.join(os.path.split(_ROOT)[0], 'pysyd')
# copy scripts from src -> dev
for script in scripts:
if script == 'cli':
n = rows_cli
else:
n = rows
# keep header of local script
with open(os.path.join(_ROOT, "%s.py"%script), "r") as f:
lines = [line for line in f.readlines()]
header = lines[:n]
# copy new body from pysyd package
with open(os.path.join(package, '%s.py'%script), "r") as f:
lines = [line for line in f.readlines()]
body = lines[n:]
# smash together header & body
lines = header+body
with open(os.path.join(_ROOT, "%s.py"%script), "w") as f:
for line in lines:
f.write(line)
import shutil
# version is different
src = os.path.join(package, 'version.py')
dst = os.path.join(_ROOT, 'version.py')
shutil.copy(src, dst)
import glob
# make sure data and dicts are up-to-date
files = glob.glob(os.path.join(package, 'data', '*'))
for file in files:
dst = os.path.join(_ROOT, 'info', 'data', os.path.split(file)[-1])
shutil.copy(file, dst)
files = glob.glob(os.path.join(package, 'dicts', '*'))
for file in files:
dst = os.path.join(_ROOT, 'dicts', os.path.split(file)[-1])
shutil.copy(file, dst)
|
[
"ashleychontos@gmail.com"
] |
ashleychontos@gmail.com
|
690579990da8d1cbd2a372b1e2f2935da11e6455
|
4437b4e67bba457c93af6b835b210f926e605b39
|
/activity/wolearn/xxdgg.py
|
27337e1a719bdd2bdb59ad8ab055c14744e23e8b
|
[
"MIT"
] |
permissive
|
mengniu8/UnicomDailyTask
|
4bde161f5aa5046bac3dd47e3b91acff4c6f9adc
|
1ee9a1a0efd509dccf541ed14d90be501eb95d14
|
refs/heads/main
| 2023-08-20T15:11:14.748525
| 2021-11-02T15:29:16
| 2021-11-02T15:29:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,739
|
py
|
# -*- coding: utf8 -*-
# import json
from random import randint
from utils import jsonencode as json
from activity.wolearn.wolearn import WoLearn
class XxdggAct(WoLearn):
def __init__(self, mobile, password):
super(XxdggAct, self).__init__(mobile, password)
self.chc = "VXADcwYxB35aH1UfVkZTKAAy"
self.config = self.allconfig.get(self.chc, {})
if self.config.get('accessToken', False):
self.session.headers.update({
'accessToken': self.config['accessToken'],
'Referer': self.config['Referer']
})
self.isLogin = True
else:
self.isLogin = False
self.prizeList = []
def getReward(self, item):
url = 'https://edu.10155.com/wxx-api/Api/XxdggAct/getReward'
data = {
"p": "",
"chc": self.config.get('chc'),
"jrPlatform": "ACTIVITY",
"ua": self.useragent.replace(" ", "+"),
"cookie": "",
"bonusId": item["xbl_id"],
"account": self.mobile
}
if item['xbl_reward_log']:
data['extra'] = {
"name": "",
"phone": self.mobile,
"addr": ""
}
# TODO
return
resp = self.session.post(url=url, data=data)
result = resp.json()
print(result)
def raffle(self):
url = 'https://edu.10155.com/wxx-api/Api/XxdggAct/raffle'
data = {
"p": "",
"chc": self.config.get('chc'),
"jrPlatform": "ACTIVITY",
"ua": self.useragent.replace(" ", "+"),
"cookie": ""
}
resp = self.session.post(url=url, data=data)
result = resp.json()
try:
reward_name = f"刮卡_{self.now_time}_{result['data']['xbl_reward_name']}"
self.recordPrize(reward_name)
print(json.dumps(result, indent=4, ensure_ascii=False))
except Exception as e:
print(e)
print(resp.json())
def userActInfo(self, debug=False):
url = 'https://edu.10155.com/wxx-api/Api/XxdggAct/userActInfo'
data = {
"p": "",
"chc": self.config.get('chc'),
"jrPlatform": "ACTIVITY",
"ua": self.useragent.replace(" ", "+"),
"cookie": ""
}
resp = self.session.post(url=url, data=data)
result = resp.json()
print(resp.headers.get('Set-Cookie', None))
if result['message'].find('登录') > -1:
print(result['message'])
self.isLogin = False
return 0, 1, 5
try:
self.prizeList = result['data']['reward'][-10:]
if not debug:
result['data']['reward'] = result['data']['reward'][-1:]
except Exception as e:
print(str(e))
print(json.dumps(result, indent=4, ensure_ascii=False))
lottery_times = result['data']['lottery_times']
lottery_chance = result['data']['lottery_chance']
possible_chances = result['data']['possible_chances']
return (
int(lottery_times) if lottery_times else 0,
int(lottery_chance) if lottery_chance else 1,
int(possible_chances) if possible_chances else 5
)
def addRaffleChance(self, orderId, tip):
url = 'https://edu.10155.com/wxx-api/Api/XxdggAct/addRaffleChance'
data = {
"p": "",
"chc": self.config.get('chc'),
"jrPlatform": "ACTIVITY",
"ua": self.useragent.replace(" ", "+"),
"phone": self.mobile,
"cookie": "",
"orderId": orderId
}
if tip:
data['type'] = tip
resp = self.session.post(url=url, data=data)
result = resp.json()
print(json.dumps(result, indent=4, ensure_ascii=False))
def handlePrize(self):
for item in self.prizeList:
if int(item['xbl_reward_status']) or int(item['xbl_reward_id']) in [6, 7, 8, 10]:
continue
else:
print(item)
self.getReward(item)
self.flushTime(3)
def run(self):
info = lottery_times, lottery_chance, possible_chances = self.userActInfo()
if not self.isLogin or self.config['timestamp'][:8] != self.now_date.replace('-', ''):
self.isLogin = True
self.openPlatLineNew(
'https://edu.10155.com/wact/xxdgg-act.html?jrPlatform=SHOUTING&chc=VXADcwYxB35aH1UfVkZTKAAy&vid=-1'
)
self.shoutingTicketLogin(self.chc)
info = lottery_times, lottery_chance, possible_chances = self.userActInfo()
print(info)
if not self.isLogin:
print('登录失败')
return
if possible_chances == lottery_times:
print('抽奖次数用完')
return
if lottery_times == 0 or lottery_times == lottery_chance:
# self.flushTime(randint(10, 15))
tip = None
options = {
'arguments1': '',
'arguments2': '',
'codeId': 946246464,
'channelName': 'android-教育频道刮卡活动-激励视频',
'remark': '教育频道刮卡活动',
'ecs_token': self.session.cookies.get('ecs_token')
}
orderId = self.toutiao.reward(options)
if lottery_times == 0:
tip = 'goodLuck' # 额外三次 触发
self.addRaffleChance(orderId, tip)
self.raffle()
self.userActInfo()
self.handlePrize()
if __name__ == '__main__':
pass
|
[
"49028484+rhming@users.noreply.github.com"
] |
49028484+rhming@users.noreply.github.com
|
1a9ede66ad121db5b32e7f7d98721ccf15c5ec28
|
f71193c2d542a88e2e128bc7b39e97b75f4e3733
|
/data_prepare/pdf_to_text.py
|
a6c3658fe186d45e721a4951529ad300b2a670f8
|
[
"MIT"
] |
permissive
|
XuJ/CPM-Pretrain
|
cc8b22ad08b0196e3e5d1ee4bed056ed0d35298a
|
44ffc5f553c95d81f54420881150cc117912ba32
|
refs/heads/main
| 2023-04-25T03:56:13.275929
| 2021-04-28T01:11:30
| 2021-04-28T01:11:30
| 357,814,642
| 0
| 1
|
MIT
| 2021-04-14T07:30:46
| 2021-04-14T07:30:45
| null |
UTF-8
|
Python
| false
| false
| 3,725
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/4/16 16:06
# @Author : jiaoxu
# @File : pdf_to_text.py
# @Software: PyCharm
"""
将分布在各个文件夹中的pdf文件,通过pdfminer转化为txt文件
同时剔除长度过短句子,以及按照从后往前的顺序剔除字符直至遇到中文句末标点
"""
from pdfminer.high_level import extract_pages
from pdfminer.layout import LTTextContainer
import logging
import re
import os
import datetime
MIN_SENTENCE_LENGTH=5
# CHINESE_EOS = ["。", "?", "!", "”", "……", ")", "》", "】"]
CHINESE_EOS = ["。", "?", "!", "”", "……"]
pdf_dir = "/home/zjlab/data/ShangjianTech_unzip"
pdf_prefix = "sj_pdf"
txt_dir = "/home/zjlab/data/ShangjianTech_txt"
logging_dir = "/home/zjlab/log"
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
c_time = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S_%f')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.FileHandler(os.path.join(logging_dir, 'data_prepare_{}.log'.format(c_time)))
handler.setFormatter(formatter)
logger.addHandler(handler)
def truncate_chinese_sentence(text):
char_list = [c for c in text]
char_list.reverse()
end_loc = -1
for loc, c in enumerate(char_list):
if c in CHINESE_EOS:
end_loc = loc
break
if end_loc == 0:
return text
elif end_loc == -1:
return ""
else:
return text[:-end_loc]
def clean_text(text):
text = text.strip()
text = re.sub("\s+", " ", text)
text = truncate_chinese_sentence(text)
text_no_space = re.sub("\s", "", text)
if text:
if len(text_no_space) > MIN_SENTENCE_LENGTH:
return True, text
return False, text
for pdf_subfix in range(10):
logger.info("#"* 25)
print("#"* 25)
pdf_subfix = str(pdf_subfix)
folder_dir = "{}_{}".format(pdf_prefix, pdf_subfix)
logger.info("Processing folder {}".format(folder_dir))
print("Processing folder {}".format(folder_dir))
for _h, pdf_file in enumerate(os.listdir(os.path.join(pdf_dir, folder_dir))):
if pdf_file.endswith(".pdf"):
file_name = pdf_file.split(".")[0]
txt_file = "{}.txt".format(file_name)
if _h % 100 == 0:
print("Current processed {} files".format(str(_h)))
logger.info("Current processed {} files".format(str(_h)))
if os.path.exists(os.path.join(txt_dir, "{}_{}".format(folder_dir, txt_file))):
continue
else:
try:
logger.info("Processing file {}".format(txt_file))
with open(os.path.join(txt_dir, "{}_{}".format(folder_dir, txt_file)), "w", encoding="utf8") as output_fh:
for i, page_layout in enumerate(extract_pages(os.path.join(pdf_dir, folder_dir, pdf_file))):
for j, element in enumerate(page_layout):
if isinstance(element, LTTextContainer):
t = element.get_text()
flag, t = clean_text(t)
if flag:
output_fh.write(t)
output_fh.write("\n")
except Exception as e:
logger.error("Error when processing file {}, error message: {}".format(os.path.join(pdf_dir, folder_dir, pdf_file), e))
os.remove(os.path.join(txt_dir, "{}_{}".format(folder_dir, txt_file)))
|
[
"jiaoxu@zhejianglab.com"
] |
jiaoxu@zhejianglab.com
|
a953b53045fbf11744d5b650fd5880dd65899122
|
803b7399a0c34c4896be7a2e1ee07fd796dbfad3
|
/QueryAgentClass.py
|
a34da344f4213566c3e69e1e43dc7fc218428da3
|
[] |
no_license
|
vishvardhanreddy/Auto-Remedy
|
cdf2b896d710280d757cc203af080d2bd7067da9
|
e7164977de3f5a9f7ac2b769111a65c433005c58
|
refs/heads/master
| 2016-08-05T22:30:05.144744
| 2015-05-06T02:01:37
| 2015-05-06T02:01:37
| 35,134,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
from pymongo import MongoClient
import datetime
from datetime import date
from time import time, sleep
import re
import os
import time
import json
import socket
import sys
import RemedyAgent
import pConnect
from bson.objectid import ObjectId
sys.path.insert(0, '/home/sharishc')
import Signature
import logging
class QueryAgent
|
[
"vishvardhanreddy"
] |
vishvardhanreddy
|
ad0bf3e872c9058c3bbf4c54ba8bcd066d4b87b4
|
9f7e894bb4000f1e10ee3c970c0659f79965fdc3
|
/botXapp.py
|
8477655120402eafb2f24eb32e6f65bde7414abe
|
[] |
no_license
|
superbotx/manipulation_api
|
81dee9512fe0b05d30f37679e341131ae020141d
|
d916a8c52a3022b5cdf78d6659bf3f365eb24c4c
|
refs/heads/master
| 2020-03-12T03:15:28.851238
| 2018-04-21T00:00:40
| 2018-04-21T00:00:40
| 130,420,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
from botXsrc.botXexport import botXexport
"""
botXexport is a dictionary containing all the reusable components you
developed for the project, and you will use them in the main program.
"""
def main():
print('starting app ...')
manipulation_api = botXexport['manipulation_api']['module']()
manipulation_api.setup()
"""
This is the only script that should be running from terminal so that the
program can gather modules correctly, so we need to specify main as entry point.
"""
if __name__ == '__main__':
main()
|
[
"ashisghosh@live.com"
] |
ashisghosh@live.com
|
a4d5647a8357e0f10ede2a154d0c7d6d0e057021
|
6bdd36ff70922241114f766ce2f702c120d34601
|
/src/totwistimage_service.py
|
0dc2f6ece5b2a799158342207083df6942baf72b
|
[] |
no_license
|
slremy/ros_web_service
|
961f07f4139caf12656f2d2c030c2bc7a787adf1
|
481897111d247ba24a8ba74b76640006bb5f5ebd
|
refs/heads/master
| 2021-05-16T02:37:10.591812
| 2021-04-03T12:50:05
| 2021-04-03T12:50:05
| 42,208,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,359
|
py
|
#!/usr/bin/env python
'''
Copyright (c) 2013 Sekou Remy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import roslib
roslib.load_manifest('geometry_msgs')
roslib.load_manifest('nav_msgs')
from numpy import uint8
import rospy
import web
import sys
from struct import unpack
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
class twist_converter:
def __init__(self):
rospy.init_node('twist_converter', anonymous=True)
self.num_robots=int(rospy.get_param('~num_robots',1))
self.publishers = [None]*self.num_robots;
self.subscribers = [None]*self.num_robots;
if rospy.has_param('~robot_prefix'): #if there is a robot prefix assume that there is actually one or more
#full_param_name = rospy.search_param('robot_prefix')
#robot_prefix = rospy.get_param(full_param_name)
robot_prefix=rospy.get_param('~robot_prefix')
for r in range(self.num_robots):
self.publishers[r]=rospy.Publisher(robot_prefix+str(r)+'/cmd_vel',Twist);
self.subscribers[r] = rospy.Subscriber(robot_prefix+str(r)+'/image', Image, self.callback, r)
else: # if no robot prefix, assume that there is only one robot
self.publishers[0] = rospy.Publisher('cmd_vel',Twist);rospy.logwarn("assuming /cmd_vel, number of robots actually "
+str(self.num_robots))
self.subscribers[0] = rospy.Subscriber("image", Image, self.callback, 0)
self.data_uri = rospy.get_param("data_uri","/twist");
self.urls = (self.data_uri,'twist', "/stop","stop","/controller","controller")
self.data = ['-10']*self.num_robots;
self.datasize = [0]*self.num_robots;
self.port=int(rospy.get_param("~port","8080"));
#self.data_uri2 = rospy.get_param("data_uri","/pose");
rospy.logwarn("running")
def callback(self,msg,id):
#get the data from the message and store as a string
try:
self.data[id] = msg.data;
self.datasize[id]=msg.width*msg.height*4; #assumes rgba8
except Exception, err:
rospy.logwarn("Cannot convert the Image message due to %s, with robot %s" % err, id)
class controller:
def __init__(self):
self.render = web.template.render('templates/')
def GET(self):
return self.render.stage("",None)
def POST(self):
return self.render.stage("",None)
class stop:
def GET(self):
return exit(0)
def POST(self):
return exit(0)
class twist:
def GET(self):
return self.process()
def POST(self):
return self.process()
def process(self):
global tc
msg=Twist();
robot_id=0;
i = web.input();
try:
if hasattr(i, "lx"):
msg.linear.x = float(i.lx)
if hasattr(i, "ly"):
msg.linear.y = float(i.ly)
if hasattr(i, "lz"):
msg.linear.z = float(i.lz)
if hasattr(i, "ax"):
msg.angular.x = float(i.ax)
if hasattr(i, "ay"):
msg.angular.y = float(i.ay)
if hasattr(i, "az"):
msg.angular.z = float(i.az)
if hasattr(i, "id"):
robot_id = int(i.id)
#msg.linear.z = -0.0049
if robot_id < tc.num_robots: tc.publishers[robot_id].publish(msg);
except Exception, err:
rospy.logwarn("Cannot convert/publish due to %s" % err)
data=str(tc.datasize[robot_id])+","+''.join(str(i)+"," for i in unpack(tc.datasize[robot_id]*"B",tc.data[robot_id])).strip(
',');
size = len(data);
web.header("Content-Length", str(size)) # Set the Header
#output to browser
web.header("Content-Type", "text/plain") # Set the Header
return data
tc = twist_converter()
app = web.application(tc.urls, globals())
if __name__ == "__main__":
wsgifunc = app.wsgifunc()
wsgifunc = web.httpserver.StaticMiddleware(wsgifunc);
#server = web.httpserver.WSGIServer(("0.0.0.0", 8080),wsgifunc)
server = web.httpserver.WSGIServer(("0.0.0.0", tc.port),wsgifunc)
print "http://%s:%d/%s" % ("0.0.0.0", tc.port, tc.urls)
try:
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
print "Shutting down service"
msg=Twist();
msg.linear.z = -0.0049;
for i in range(tc.num_robots):
tc.publishers[i].publish(msg)
|
[
"sekouremy@gmail.com@a6724ec2-1828-344f-a6e4-dcee8d45cc22"
] |
sekouremy@gmail.com@a6724ec2-1828-344f-a6e4-dcee8d45cc22
|
d5f98d617a280930d91a0f7c1d9a2c7cc9b05353
|
39e22b705d4c6d0432f797ea0c64917a77622042
|
/src/test_model.py
|
0db2b2a56994ae1504df9840f9b86fb0d8f4c4c0
|
[] |
no_license
|
gnguilherme/cnn_augmentation
|
88807c99a2731f9a0d60ca06b26a05a0b6ed6ed7
|
1671d71085b4fde970c1341d65aad5efe70b4534
|
refs/heads/master
| 2023-07-22T00:16:45.600617
| 2021-08-14T19:47:42
| 2021-08-14T19:47:42
| 396,009,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
import os
import json
import tensorflow as tf
import matplotlib.pyplot as plt
from src.train import create_model, get_dataset
AUTOTUNE = tf.data.AUTOTUNE
with open('src/config.json', 'rb') as file:
config = json.load(file)
MODEL_PATH = config['model_path']
def main():
test_ds = get_dataset(step='test')
model = create_model()
model_path = os.path.join(MODEL_PATH, 'model')
try:
model.load_weights(model_path)
print("Model weights loaded")
except Exception as e:
print(e)
print("Weights not loaded")
model.evaluate(test_ds)
for batch in test_ds.as_numpy_iterator():
for img, label in zip(batch[0], batch[1]):
plt.imshow(img, interpolation='bicubic')
plt.suptitle(f"Prediction: {label}")
plt.tight_layout()
plt.show()
break
if __name__ == '__main__':
main()
|
[
"ggaighernt@gmail.com"
] |
ggaighernt@gmail.com
|
0e4fb4d8e6191187de574dfc987ebb9d3c4b2dde
|
1e0203f40d4cffed0d64449edeaea00311f4b732
|
/kth-smallest-element-in-a-bst/solution.py
|
90676be9e1d6a3c29f0e0ddcbb74bdaadf0b6205
|
[] |
no_license
|
childe/leetcode
|
102e87dd8d918877f64e7157d45f3f45a607b9e4
|
d2e8b2dca40fc955045eb62e576c776bad8ee5f1
|
refs/heads/master
| 2023-01-12T01:55:26.190208
| 2022-12-27T13:25:27
| 2022-12-27T13:25:27
| 39,767,776
| 2
| 1
| null | 2020-10-13T01:29:05
| 2015-07-27T10:05:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
# -*- coding: utf-8 -*-
"""
https://leetcode-cn.com/problems/kth-smallest-element-in-a-bst/
Given a binary search tree, write a function kthSmallest to find the kth smallest element in it.
Example 1:
Input: root = [3,1,4,null,2], k = 1
3
/ \
1 4
\
2
Output: 1
Example 2:
Input: root = [5,3,6,2,4,null,null,1], k = 3
5
/ \
3 6
/ \
2 4
/
1
Output: 3
Follow up:
What if the BST is modified (insert/delete operations) often and you need to find the kth smallest frequently? How would you optimize the kthSmallest routine?
Constraints:
The number of elements of the BST is between 1 to 10^4.
You may assume k is always valid, 1 ≤ k ≤ BST's total elements.
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
if not self.nodeCountCache:
self.nodeCountCache
leftCount = self.nodeCount(root.left)
if leftCount == k - 1:
return root.val
if leftCount < k - 1:
return self.kthSmallest(root.right, k - leftCount - 1)
return self.kthSmallest(root.left, k)
def nodeCount(self, root):
if root in self.nodeCountCache:
return self.nodeCountCache[root]
if root is None:
return 0
r = self.nodeCount(root.left) + self.nodeCount(root.right) + 1
self.nodeCountCache[root] = r
return r
|
[
"rmself@qq.com"
] |
rmself@qq.com
|
1e202b85fdedfd6a88dfd652e1045b4d9ba33f44
|
c47290c21da82defae5d7414b1e033c2f7467924
|
/conf/localhost/bhagirath.django.wsgi
|
9951b9692edbd2d5279558f3e9389446bb8716a7
|
[] |
no_license
|
pingali/bhagirath
|
6131f9d7851bf2897dac8e893860c4b338e01b4a
|
3dfebed61ab648f5219f61cc7170288b8265dd6c
|
refs/heads/master
| 2021-01-01T20:00:30.915387
| 2012-06-14T10:47:38
| 2012-06-14T10:47:38
| 3,169,330
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
wsgi
|
import os
import sys
def findpath(path):
return os.path.abspath(os.path.join(os.path.dirname(__file__),path))
# put the Django project on sys.path
sys.path.insert(0, findpath("../.."))
sys.path.insert(0, findpath("../../bhagirath"))
os.environ["DJANGO_SETTINGS_MODULE"] = "bhagirath.settings"
from django.core.handlers.wsgi import WSGIHandler
application = WSGIHandler()
|
[
"pingali@gmail.com"
] |
pingali@gmail.com
|
dbf6571352bec851779cf0163b411cd9fe08a94d
|
b08d42933ac06045905d7c005ca9c114ed3aecc0
|
/src/coefSubset/evaluate/ranks/tenPercent/rank_4eo5_D.py
|
31ce4561eaa828c73dc0d6070c6dfb3b46635e36
|
[] |
no_license
|
TanemuraKiyoto/PPI-native-detection-via-LR
|
d148d53f5eb60a4dda5318b371a3048e3f662725
|
897e7188b0da94e87126a4acc0c9a6ff44a64574
|
refs/heads/master
| 2022-12-05T11:59:01.014309
| 2020-08-10T00:41:17
| 2020-08-10T00:41:17
| 225,272,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,386
|
py
|
# 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '4eo5.csv'
identifier = 'D'
coefFrac = 0.1
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
|
[
"tanemur1@msu.edu"
] |
tanemur1@msu.edu
|
6e2e5297aadfe98a277c072463a0258f5aaa0bf9
|
079cde666810d916c49a9ac49189a929ad19e72f
|
/qsbk/qsbk/spiders/qsbk_spider.py
|
62ee357ad9cf341e9300040d34c3c4bc1cbae5d8
|
[] |
no_license
|
jokerix/demo
|
82d01582a9882ac361766516d07c9ad700053768
|
7c0d7666f82ec78e9562956bb0f4482af8531ebb
|
refs/heads/master
| 2021-07-14T02:47:28.686827
| 2020-07-03T15:16:32
| 2020-07-03T15:16:32
| 179,784,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from qsbk.items import QsbkItem
class QsbkSpiderSpider(scrapy.Spider):
name = 'qsbk_spider'
allowed_domains = ['qiushibaike.com']
start_urls = ['https://www.qiushibaike.com/text/page/1/']
base_domain = 'https://www.qiushibaike.com'
def parse(self, response):
duanzidiv = response.xpath("//div[@id = 'content-left']/div")
for duanzi in duanzidiv:
author = duanzi.xpath(".//h2/text()").get().strip()
content = duanzi.xpath(".//div[@class= 'content']//text()").getall()
content = ''.join(content)
item = QsbkItem(author=author, content=content)
yield item
next_url = response.xpath("//ul[@class =[pagination']/li[last()]/a/@href").get()
if not next_url:
return
else:
yield scrapy.Request(self.base_domain+next_url, callback=self.parse)
|
[
"1215774897@qq.com"
] |
1215774897@qq.com
|
0ad84d45a028072cb09d4a2c559789b516455b30
|
e3dc8227b11a0de7f7a21c12b28f4cb7b6d7add1
|
/Real_time_identification_1.4.2_for_imaging_ver.3_gui_fixed/module/cnn_processing.py
|
be7b6e8786977bea67ad9aa2edf3d9b631088f14
|
[] |
no_license
|
yt050297/Realtime-Identification
|
b8d2965e26ac3f405e33da83980ed8f76a289271
|
297b7e4773b3a65d698502db3fccf89aabc0b485
|
refs/heads/main
| 2023-09-02T20:59:44.490194
| 2021-10-12T07:57:12
| 2021-10-12T07:57:12
| 384,337,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,675
|
py
|
import os
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
import keras
from keras.models import Sequential, model_from_json
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Dense, Dropout, Activation, Flatten
import matplotlib.pyplot as plt
from module.show_infrared_camera import ShowInfraredCamera
class CNN:
def __init__(self, classnum, traindir, im_size_width, im_size_height, flip, epoch, initial_height, initial_side, pixel_height, pixel_side, error):
self.conv1 = 30
self.conv2 = 20
self.conv3 = 10
self.dense1 = 100
self.dense2 = classnum
self.nb_epoch = epoch
self.nb_batch = 64
self.learning_rate = 1e-3
self.classnum = classnum
self.traindir = traindir
self.im_size_width = im_size_width
self.im_size_height = im_size_height
self.flip = flip
self.initial_height = initial_height
self.initial_side = initial_side
self.pixel_height = pixel_height
self.pixel_side = pixel_side
self.error = error
self.model_structure = 'convreluMax' + str(self.conv1) + '_convreluMax' + str(self.conv2) + '_convreluMax' + str(
self.conv3) + '_dense' + str(self.dense1) + 'relu_softmax'
self.f_log = self.traindir + 'width' + str(
self.im_size_width) + 'height' + str(self.im_size_height) + 'flip' + self.flip + '/' + self.model_structure + '_lr' + str(self.learning_rate) + '/Adam_epoch' + str(
self.nb_epoch) + '_batch' + str(self.nb_batch)
self.f_model = self.traindir + 'width' + str(
im_size_width) + 'height' + str(self.im_size_height) + 'flip' + self.flip + '/' + self.model_structure + '_lr' + str(self.learning_rate) + '/Adam_epoch' + str(
self.nb_epoch) + '_batch' + str(self.nb_batch)
os.makedirs(self.f_model, exist_ok=True)
os.makedirs(self.f_log, exist_ok=True)
def cnn_train(self, X_train, Y_train, X_test, Y_test):
conv1 = self.conv1
conv2 = self.conv2
conv3 = self.conv3
dense1 = self.dense1
dense2 = self.dense2
# ニュートラルネットワークで使用するモデル作成
old_session = KTF.get_session()
# old_session = tf.compat.v1.keras.backend.get_session()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
KTF.set_learning_phase(1)
model = Sequential()
model.add(Conv2D(conv1, kernel_size=(3, 3), activation='relu', input_shape=(X_train.shape[1:])))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(conv2, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(conv3, (3, 3), activation='relu'))
# model.add(Dropout(0.25))
# model.add(Conv2D(128, (3, 3), padding='same',activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(dense1, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(self.classnum, activation='softmax'))
model.summary()
# optimizer には adam を指定
adam = keras.optimizers.Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
es_cb = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=0,
mode='auto')
tb_cb = keras.callbacks.TensorBoard(log_dir=self.f_log, histogram_freq=1)
# cp_cb = keras.callbacks.ModelCheckpoint(filepath = os.path.join(f_model,'cnn_model{epoch:02d}-loss{loss:.2f}-acc{acc:.2f}-vloss{val_loss:.2f}-vacc{val_acc:.2f}.hdf5'), monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
# cbks = [es_cb, tb_cb, cp_cb]
cbks = [es_cb, tb_cb]
# cbks = [tb_cb]
history = model.fit(X_train, Y_train, batch_size=self.nb_batch, epochs=self.nb_epoch,
validation_data=(X_test, Y_test), callbacks=cbks, verbose=1)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('save the architecture of a model')
json_string = model.to_json()
open(os.path.join(self.f_model, 'cnn_model.json'), 'w').write(json_string)
yaml_string = model.to_yaml()
open(os.path.join(self.f_model, 'cnn_model.yaml'), 'w').write(yaml_string)
print('save weights')
model.save_weights(os.path.join(self.f_model, 'cnn_weights.hdf5'))
KTF.set_session(old_session)
return
def cnn_train_noneval(self, X_train, Y_train):
conv1 = self.conv1
conv2 = self.conv2
conv3 = self.conv3
dense1 = self.dense1
dense2 = self.dense2
# ニュートラルネットワークで使用するモデル作成
old_session = KTF.get_session()
#old_session = tf.compat.v1.keras.backend.get_session()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
KTF.set_learning_phase(1)
model = Sequential()
model.add(Conv2D(conv1, kernel_size=(3, 3), activation='relu', input_shape=(X_train.shape[1:])))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(conv2, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(conv3, (3, 3), activation='relu'))
# model.add(Dropout(0.25))
# model.add(Conv2D(128, (3, 3), padding='same',activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(dense1, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(self.classnum, activation='softmax'))
model.summary()
# optimizer には adam を指定
adam = keras.optimizers.Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
history = model.fit(X_train, Y_train, batch_size=self.nb_batch, epochs=self.nb_epoch,
validation_data=None, callbacks=None, verbose=1)
print('save the architecture of a model')
json_string = model.to_json()
open(os.path.join(self.f_model, 'cnn_model.json'), 'w').write(json_string)
yaml_string = model.to_yaml()
open(os.path.join(self.f_model, 'cnn_model.yaml'), 'w').write(yaml_string)
print('save weights')
model.save_weights(os.path.join(self.f_model, 'cnn_weights.hdf5'))
KTF.set_session(old_session)
return
def cnn_test(self, trigger_type, gain, exp, classnamelist, cvv):
# ニュートラルネットワークで使用するモデル作成
model_filename = 'cnn_model.json'
weights_filename = 'cnn_weights.hdf5'
old_session = KTF.get_session()
#show_infrared_camera = None
#show_infrared_camera = ShowInfraredCamera()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
json_string = open(os.path.join(self.f_model, model_filename)).read()
model = model_from_json(json_string)
model.summary()
adam = keras.optimizers.Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.load_weights(os.path.join(self.f_model, weights_filename))
cvv.realtime_identification(classnamelist,model,trigger_type,gain,exp,self.im_size_width,self.im_size_height,self.flip)
cbks = []
KTF.set_session(old_session)
def cnn_test_color(self, trigger_type, gain, exp, classnamelist, cvv):
# ニュートラルネットワークで使用するモデル作成
model_filename = 'cnn_model.json'
weights_filename = 'cnn_weights.hdf5'
old_session = KTF.get_session()
#show_infrared_camera = None
#show_infrared_camera = ShowInfraredCamera()
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
json_string = open(os.path.join(self.f_model, model_filename)).read()
model = model_from_json(json_string)
model.summary()
adam = keras.optimizers.Adam(lr=self.learning_rate)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.load_weights(os.path.join(self.f_model, weights_filename))
cvv.realtime_identification_color(classnamelist, model, trigger_type, gain, exp,
self.im_size_width, self.im_size_height, self.flip,self.initial_height,self.initial_side,self.pixel_height, self.pixel_side,self.error)
cbks = []
KTF.set_session(old_session)
|
[
"yt050297@gmail.com"
] |
yt050297@gmail.com
|
de32c1537bc409be6ed69442f08c736abee41bbd
|
d13f8cb91c56b30ee886fcba7e28ab3054d50c2f
|
/ProjectQ/projectq/setups/decompositions/time_evolution.py
|
a0897af9433a414fbb7aa5bd8fd1775524cc031c
|
[
"Apache-2.0"
] |
permissive
|
cowanmeg/501-proj
|
05f548a14a895f0eeb8dd544fcec43f1dac9da56
|
109102726db668fbfb963c6f0d884bdef9699bfc
|
refs/heads/master
| 2021-03-22T05:21:55.393992
| 2017-06-09T06:46:02
| 2017-06-09T06:46:02
| 92,208,588
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,290
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Registers decomposition for the TimeEvolution gates.
An exact straight forward decomposition of a TimeEvolution gate is possible
if the hamiltonian has only one term or if all the terms commute with each
other in which case one can implement each term individually.
"""
import math
from projectq.cengines import DecompositionRule
from projectq.meta import Control, Compute, Uncompute
from projectq.ops import TimeEvolution, QubitOperator, H, Y, CNOT, Rz, Rx, Ry
def _recognize_time_evolution_commuting_terms(cmd):
"""
Recognize all TimeEvolution gates with >1 terms but which all commute.
"""
hamiltonian = cmd.gate.hamiltonian
if len(hamiltonian.terms) == 1:
return False
else:
id_op = QubitOperator((), 0.0)
for term in hamiltonian.terms:
test_op = QubitOperator(term, hamiltonian.terms[term])
for other in hamiltonian.terms:
other_op = QubitOperator(other, hamiltonian.terms[other])
commutator = test_op * other_op - other_op * test_op
if not commutator.isclose(id_op,
rel_tol=1e-9,
abs_tol=1e-9):
return False
return True
def _decompose_time_evolution_commuting_terms(cmd):
qureg = cmd.qubits
eng = cmd.engine
hamiltonian = cmd.gate.hamiltonian
time = cmd.gate.time
with Control(eng, cmd.control_qubits):
for term in hamiltonian.terms:
ind_operator = QubitOperator(term, hamiltonian.terms[term])
TimeEvolution(time, ind_operator) | qureg
def _recognize_time_evolution_individual_terms(cmd):
return len(cmd.gate.hamiltonian.terms) == 1
def _decompose_time_evolution_individual_terms(cmd):
"""
Implements a TimeEvolution gate with a hamiltonian having only one term.
To implement exp(-i * t * hamiltonian), where the hamiltonian is only one
term, e.g., hamiltonian = X0 x Y1 X Z2, we first perform local
transformations to in order that all Pauli operators in the hamiltonian
are Z. We then implement exp(-i * t * (Z1 x Z2 x Z3) and transform the
basis back to the original. For more details see, e.g.,
James D. Whitfield, Jacob Biamonte & Aspuru-Guzik
Simulation of electronic structure Hamiltonians using quantum computers,
Molecular Physics, 109:5, 735-750 (2011).
or
Nielsen and Chuang, Quantum Computation and Information.
"""
assert len(cmd.qubits) == 1
qureg = cmd.qubits[0]
eng = cmd.engine
time = cmd.gate.time
hamiltonian = cmd.gate.hamiltonian
assert len(hamiltonian.terms) == 1
term = list(hamiltonian.terms)[0]
coefficient = hamiltonian.terms[term]
check_indices = set()
# Check that hamiltonian is not identity term,
# Previous or operator should have apply a global phase instead:
assert not term == ()
# hamiltonian has only a single local operator
if len(term) == 1:
with Control(eng, cmd.control_qubits):
if term[0][1] == 'X':
Rx(time * coefficient * 2.) | qureg[term[0][0]]
elif term[0][1] == 'Y':
Ry(time * coefficient * 2.) | qureg[term[0][0]]
else:
Rz(time * coefficient * 2.) | qureg[term[0][0]]
# hamiltonian has more than one local operator
else:
with Control(eng, cmd.control_qubits):
with Compute(eng):
# Apply local basis rotations
for index, action in term:
check_indices.add(index)
if action == 'X':
H | qureg[index]
elif action == 'Y':
Rx(math.pi / 2.) | qureg[index]
# Check that qureg had exactly as many qubits as indices:
assert check_indices == set((range(len(qureg))))
# Compute parity
for i in range(len(qureg)-1):
CNOT | (qureg[i], qureg[i+1])
Rz(time * coefficient * 2.) | qureg[-1]
# Uncompute parity and basis change
Uncompute(eng)
rule_commuting_terms = DecompositionRule(gate_class=TimeEvolution,
gate_decomposer=_decompose_time_evolution_commuting_terms,
gate_recognizer=_recognize_time_evolution_commuting_terms)
rule_individual_terms = DecompositionRule(gate_class=TimeEvolution,
gate_decomposer=_decompose_time_evolution_individual_terms,
gate_recognizer=_recognize_time_evolution_individual_terms)
all_defined_decomposition_rules = [rule_commuting_terms,
rule_individual_terms]
|
[
"cowanmeg@magnesium.dyn.cs.washington.edu"
] |
cowanmeg@magnesium.dyn.cs.washington.edu
|
43251bc844c701e640866b166aa62235285ec8b4
|
e87075083ebda6e736f1b5fbc8d3bbf5b0ed7da0
|
/p4-5/p5p4.py
|
9dfd24e67c5756dfcd7417ea6656cdc7a478e438
|
[] |
no_license
|
kiwi-33/Programming_1_practicals
|
20ec7841cd03d6cf17cae6c4424e377d71cb8675
|
880aa7bc563f69010661fb1f8b404fde5148bd7a
|
refs/heads/master
| 2022-05-30T09:18:22.347044
| 2019-03-23T16:37:17
| 2019-03-23T16:37:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
number = int(input("Enter a number: "))
if number =0:
print ("Number is equal to 0")
elif 0<number>=20:
print ("Number is greater than 0 and less than or equal to 20")
elif 20<number>= 40:
print ("Number is greater than 20 and less than or equal to 40")
elif 40<number>= 60:
print ("Number is greater than 40 and less than or equal to 60")
elif 60<number>= 80:
print ("Number is greater than 60 and less than or equal to 80")
elif 80<number>= 100:
print ("Number is greater than 80 and less than or equal to 100")
elif number>100:
print ("Number is greater than 100")
elif number =0:
print ("Number is 0")
|
[
"noreply@github.com"
] |
kiwi-33.noreply@github.com
|
6bf5263858b5f7cef53615ff86a31774d5e8b2a9
|
40f3cf474854076bbfaefa232f57b99ebc15ae76
|
/Cercle1.py
|
8692e3653cca8c9693bacad4d39368afe551c7fc
|
[] |
no_license
|
Loemba-Dieumerci/ProgrammationPython1
|
686497faaa86dbd237dd8bbcd73088cb0c82e1c1
|
ab2bf8e9b0c95f27f0360edd885eb596034c7180
|
refs/heads/master
| 2023-01-07T03:57:11.660153
| 2020-11-05T07:59:12
| 2020-11-05T07:59:12
| 307,408,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from pylab import *
#LOEMBA
#Cercle de rayon 1
x=linspace(-1,1,100)
#print(x)
y=sqrt(1 - x**2)
plot(x,y, 'r')
#Pour obtenir la symétrie
plot(x,-y, 'r')
#Forcer le graphique d'avoir la mm échelle en x et y
axis("equal")
grid()
show()
|
[
"loembadieumerci1965gmail.com"
] |
loembadieumerci1965gmail.com
|
26abee7d9d7583207b928017fd3a2a412326189d
|
b3db0c8aac2eb3cb098c9b4e811747684617534e
|
/food_rec/models/guideline_directive.py
|
ede432eac6dedab12ff250bc63d67f174717cba9
|
[] |
no_license
|
solashirai/FoodRec
|
814c58431c134a1e561414dbfe2c64f842cd53fd
|
a14fcf2cafaf26c5bb6396485a16052c6925aa6f
|
refs/heads/master
| 2023-04-07T04:40:13.249868
| 2021-04-19T02:29:36
| 2021-04-19T02:29:36
| 359,238,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 942
|
py
|
from typing import FrozenSet, TypeVar, List, Set, Callable, NamedTuple
from food_rec.models import FoodKgRecipe, FoodKgUser, RecipeCandidate
from frex import DomainObject, Explanation
from frex.models.constraints import ConstraintType
from frex.utils.common import rgetattr
class GuidelineDirective(NamedTuple):
"""
Class to store information about a guideline directive, which takes the form of a type of constraint
(EQ, LEQ, etc) used to compare a recipe's target attribute against some value.
This may be expanded later to check more aspects of recipes, e.g., if some ingredient is in a recipe, but it's
not too flexible right now.
"""
target_value: float
target_attribute: str
directive_type: ConstraintType
def __call__(self, *, candidate: RecipeCandidate):
return self.directive_type(
rgetattr(candidate.domain_object, self.target_attribute), self.target_value
)
|
[
"solashakashirai@gmail.com"
] |
solashakashirai@gmail.com
|
4605e6446b4acdec8d2635b068107bdcb22c73b0
|
288757e602b561c482e0b5053945b42de0ed5692
|
/code/008.py
|
df7560cb6d4ff633e9780e7f48727b497dd6b475
|
[] |
no_license
|
ByoungJoonIm/Algorithm_Practice
|
2cddd0bf353fe386f70bcfdb089f094fabb61d63
|
56f12a66ae8bb924df5fcbc27d3a6bf3365c1e82
|
refs/heads/master
| 2021-06-28T02:13:44.418822
| 2020-10-10T23:24:59
| 2020-10-10T23:24:59
| 174,338,870
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
T = int(input())
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
for test_case in range(1, T + 1):
N = float(input())
rs = ''
for i in range(1, 13):
t = 1.0 / (1 << i)
if N - t >= 0:
rs += '1'
N = N - t
else:
rs += '0'
if N == 0:
break
if N != 0:
rs = 'overflow'
print("#{0} {1}".format(test_case, rs))
|
[
"noreply@github.com"
] |
ByoungJoonIm.noreply@github.com
|
37177fa4bbedd726b7f981728d3ae8f145a2d283
|
ac842f300f8799af001bc3262fe208edc5a54eeb
|
/src/Trees/list_tree_implementation.py
|
cdf71e6bf1f1eb28b32fc8802a0f7f5bb9c82022
|
[] |
no_license
|
anatulea/PythonDataStructures
|
ba3b1c0e7dd19a9762d7f5686f053bdb4e02e9fd
|
c20fdbff051eb94ca0a5835927be4e81b10fe60b
|
refs/heads/master
| 2023-01-20T07:53:15.835808
| 2020-11-25T23:41:20
| 2020-11-25T23:41:20
| 300,012,953
| 0
| 0
| null | 2020-11-25T23:41:21
| 2020-09-30T18:18:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 667
|
py
|
'''
Below is a representation of a Tree using a list of lists.
'''
def BinaryTree(root_node):
return [root_node, [], []]
def insertLeft(root,newBranch):
t = root.pop(1)
if len(t) > 1:
root.insert(1,[newBranch,t,[]])
else:
root.insert(1,[newBranch, [], []])
return root
def insertRight(root,newBranch):
t = root.pop(2)
if len(t) > 1:
root.insert(2,[newBranch,[],t])
else:
root.insert(2,[newBranch,[],[]])
return root
def getRootVal(root):
return root[0]
def setRootVal(root,newVal):
root[0] = newVal
def getLeftChild(root):
return root[1]
def getRightChild(root):
return root[2]
|
[
"anatulea@Anas-MacBook-Pro.local"
] |
anatulea@Anas-MacBook-Pro.local
|
9ac13e29a99269814658650420dcf1a6270aae21
|
a55bf34082c0f1eafe58d4a39d1088963f75575c
|
/Chapter8_recusion_dp/robot.py
|
a3930d685b5048daea0b80abb51df31caac17328
|
[] |
no_license
|
Kaiton7/Cracking_CodeInterview
|
69bf3d4e83feaf75b79d7429bca97c9f9e392cfe
|
eb8a63315a71813a87db360e7a5396bc552e8e5d
|
refs/heads/master
| 2020-04-26T17:47:55.262995
| 2019-03-09T08:47:22
| 2019-03-09T08:47:22
| 173,724,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,414
|
py
|
'''
def Solve(Matrix):
s_r,s_c=0,0
t_r,t_c=len(Matrix),len(Matrix[0])
dp=[[0]*len(t_c)]*t_r
detect_path(s_r,s_c,t_r,t_c,Matrix,dp)
def detect_path(s_r,s_c,t_r,t_c,Matrix,dp):
if(s_r==t_r and s_c==t_c):
return dp[t_r][t_c]
direction=[[1,0],[0,1]]
for i in direction:
dx=i[0]
dy=i[1]
if(dp[s_r+dx][s_c+dy]>dp[s_r][s_c]+1):
dp[s_r+dx][s_c+dy]=dp[s_r][s_c]+1
'''
def Solve(Matrix):
if(Matrix==None or len(Matrix)==0):
return None
path=[]
fielddp=[]
if findpath(Matrix,len(Matrix)-1,len(Matrix[0])-1,path,fielddp):
print(fielddp)
return path
return None
def findpath(Matrix,t_r,t_c,path,fielddp):
if(t_r<0 or t_c<0 or not(Matrix[t_r][t_c])):
return False
point = (t_r,t_c)
if point in fielddp:
print("failed")
return False
isAtorigin=(t_c==0) and (t_r==0)
#左と上を探しに行って、両方共行き止まりならそのポイントは調べたってことでfielddpに記録するよ
if(isAtorigin or findpath(Matrix,t_r,t_c-1,path,fielddp) or findpath(Matrix,t_r-1,t_c,path,fielddp)):
path.append(point)
print(fielddp)
return True
print("tt")
fielddp.append(point)
return False
print(Solve([ [True, True,True,True],
[True,False,False,True],
[False,True,True,True]]))
|
[
"kituemr@gmail.com"
] |
kituemr@gmail.com
|
b19d5f776040bab37179c45779dbfeb452cb61df
|
ffbf721d15d74ad330b4558328ea60ba2f7ba8f6
|
/前程带.py
|
61c8e5bcf300d0b12b7e8f67747cfff9402ec354
|
[] |
no_license
|
WangXu1997s/store
|
b15dee06cd034175650de68f4d59c7f889f21369
|
f975e8786b98ff1611879ff68b20b8f358f293a1
|
refs/heads/master
| 2023-09-04T13:18:09.630779
| 2021-10-18T01:31:59
| 2021-10-18T01:31:59
| 403,526,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,291
|
py
|
# 实现前程贷项目:
# 注册
# 认证:(实名认证,修改手机号,修改登陆密码)
# 添加银行卡
from time import sleep
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('http://8.129.91.152:8765/')
driver.maximize_window()
driver.implicitly_wait(10)
# 注册
driver.find_element('link text', '免费注册').click()
driver.find_element('id', 'phone').send_keys('13695246311')
sleep(10)
driver.find_element('link text', '获取短信验证码').click()
text = driver.find_element('xpath', '//*[@id="layui-layer1"]/div').text
sleep(1)
driver.find_element('xpath', '//input[@name="code"]').send_keys(text[-4:])
driver.find_element('xpath', '//*[@name="password"]').send_keys('gdk123456')
driver.find_element('xpath', '//*[@name="agree"]').click()
driver.find_element('xpath', '//*[text()="下一步"]').click()
driver.find_element('link text', '加入蜂群').click()
# 修改个人信息
driver.find_element('xpath', '/html/body/div/div[1]/div/div[2]/span[3]/a').click()
driver.find_element('xpath', '/html/body/div[3]/div[1]/div[2]/div[1]/div[2]/div[3]/a').click()
driver.find_element('xpath', '//*[text()="实名认证"]').click()
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[1]/div/input').send_keys('符丹峰')
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[2]/div/input'
).send_keys('410106199303132512')
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[3]/div/button').click()
# 修改手机号,手机验证码无法获取
# try:
# driver.find_element('xpath', '/html/body/div/div[3]/div[1]/ul/li[2]/a').click()
# driver.find_element('xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[1]/a').click()
# except Exception:
# driver.find_element('xpath', '/html/body/div/div[3]/div[1]/ul/li[2]/a').click()
# driver.find_element('xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[1]/a').click()
# 修改密码
try:
driver.find_element('xpath', '/html/body/div/div[3]/div[1]/ul/li[4]/a').click()
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[1]/input'
).send_keys('gdk123456')
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[2]/input'
).send_keys('gbk123456')
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[3]/input'
).send_keys('gbk123456')
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[5]/button[1]'
).click()
except Exception:
driver.find_element('xpath', '/html/body/div/div[3]/div[1]/ul/li[4]/a').click()
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[1]/input'
).send_keys('gdk123456')
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[2]/input'
).send_keys('gbk123456')
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[3]/input'
).send_keys('gbk123456')
driver.find_element(
'xpath', '//*[@id="layui-layer1"]/div[2]/div/form/div[5]/button[1]'
).click()
sleep(5)
driver.quit()
|
[
"noreply@github.com"
] |
WangXu1997s.noreply@github.com
|
ae12f240764d9abe6f5a80dabfb0dfe4c7e71b71
|
3e8127b4729d9c9a6cd7e860f3d3393531e4f90e
|
/myenv/bin/easy_install
|
c8d16faf20f20e7bbf427803b6f8b61546d94352
|
[] |
no_license
|
artourkin/djangoblog
|
cf166720677b8cf24bc2a86719b111cecd3a695c
|
4ea7cdafb2a9d24db9a96a6dc56adfe17ebba4b2
|
refs/heads/master
| 2021-03-16T10:26:18.242940
| 2015-04-13T23:00:12
| 2015-04-13T23:00:12
| 33,897,764
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
#!/Users/artur/rnd/pythonProjects/djangoblog/myenv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"artur.kulmukhametov@tuwien.ac.at"
] |
artur.kulmukhametov@tuwien.ac.at
|
|
002575641f813e42d7fbfaaf1ca2c95938890e0a
|
680ac920cb7a9d8ed648b020cfd71e13318dfbed
|
/processors/__init__.py
|
28d922a026e4516fa417ae1f815c054310df2807
|
[] |
no_license
|
wyangla/toyEngine_operator
|
b944302ac09605ca9834522a42d5c25a0c237985
|
049446aa141bb84a9b5c52b280371741b87946b1
|
refs/heads/master
| 2021-06-23T16:46:30.622767
| 2019-08-17T09:50:45
| 2019-08-17T09:50:45
| 153,284,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
# processors provide the raw information of documents
# the advanced information is generated by unit_generator
from processors.Doc_processor import Doc_processor # cls
from processors.processor_plugins import Processor_plugin_ch, Processor_plugin_en # modules
|
[
"wyglauk@gmail.com"
] |
wyglauk@gmail.com
|
2f8d820a94da8660f390c2e97f539e82dd401658
|
32226e72c8cbaa734b2bdee081c2a2d4d0322702
|
/experiments/torch_ddpg_sweep_cheetah.py
|
c95ba21ef1cea5e5d7d3a9402afdea9802e7c4a3
|
[
"MIT"
] |
permissive
|
Asap7772/rail-rl-franka-eval
|
2b1cbad7adae958b3b53930a837df8a31ab885dc
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
refs/heads/master
| 2022-11-15T07:08:33.416025
| 2020-07-12T22:05:32
| 2020-07-12T22:05:32
| 279,155,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,136
|
py
|
"""
Run PyTorch DDPG on HalfCheetah.
"""
import random
from railrl.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from railrl.exploration_strategies.ou_strategy import OUStrategy
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.networks import FeedForwardQFunction, FeedForwardPolicy
from railrl.torch.ddpg.ddpg import DDPG
import railrl.torch.pytorch_util as ptu
from rllab.envs.mujoco.half_cheetah_env import HalfCheetahEnv
from rllab.envs.normalized_env import normalize
def example(variant):
env = HalfCheetahEnv()
if variant['normalize']:
env = normalize(env)
es = OUStrategy(action_space=env.action_space)
qf = FeedForwardQFunction(
int(env.observation_space.flat_dim),
int(env.action_space.flat_dim),
32,
32,
)
policy = FeedForwardPolicy(
int(env.observation_space.flat_dim),
int(env.action_space.flat_dim),
32,
32,
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
algorithm = DDPG(
env,
qf=qf,
policy=policy,
exploration_policy=exploration_policy,
**variant['algo_params']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algo_params=dict(
num_epochs=99,
num_steps_per_epoch=10000,
num_steps_per_eval=1000,
use_soft_update=True,
tau=1e-2,
batch_size=128,
max_path_length=1000,
discount=0.99,
qf_learning_rate=1e-3,
policy_learning_rate=1e-4,
),
version="PyTorch - bigger networks",
normalize=False,
size='32',
)
for _ in range(3):
seed = random.randint(0, 999999)
run_experiment(
example,
exp_prefix="ddpg-half-cheetah-check-clean",
seed=seed,
mode='ec2',
variant=variant,
use_gpu=False,
)
|
[
"asap7772@berkeley.edu"
] |
asap7772@berkeley.edu
|
b01ed93edf28ecb19f6574b1fd27d9de19182b42
|
dd1795abeffc5994b619af8182d536315116e619
|
/Don/migrations/0003_auto_20201113_2034.py
|
d78a4e770489b5b9ac5c4d6941e99c06d0d5fb65
|
[] |
no_license
|
clinton01-cr7/GestDonCovid
|
3c5c0f43f95b5d87c504961ad81b6d795dc123ed
|
a4822a9695c41b3f13729fe5f3ef042293b061ef
|
refs/heads/master
| 2023-02-17T04:37:55.103131
| 2020-12-22T20:48:25
| 2020-12-22T20:48:25
| 311,063,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,872
|
py
|
# Generated by Django 3.1.2 on 2020-11-13 19:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Don', '0002_hopital'),
]
operations = [
migrations.CreateModel(
name='Utilisateur',
fields=[
('Utilisateur_id', models.AutoField(primary_key=True, serialize=False)),
('nom', models.CharField(max_length=25)),
('email', models.CharField(max_length=50)),
('passwd', models.CharField(max_length=250)),
('role', models.CharField(max_length=10)),
('datej', models.DateField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Donateur',
fields=[
('utilisateur_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='Don.utilisateur')),
('dateNais', models.DateField()),
],
bases=('Don.utilisateur',),
),
migrations.CreateModel(
name='Don',
fields=[
('Utilisateur_id', models.AutoField(primary_key=True, serialize=False)),
('montant', models.FloatField()),
('dateD', models.DateField(auto_now_add=True)),
('hopital_FK', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Don.hopital')),
('donateur_FK', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Don.donateur')),
],
),
migrations.AddField(
model_name='hopital',
name='donateur_FK',
field=models.ManyToManyField(through='Don.Don', to='Don.Donateur'),
),
]
|
[
"cmambou06@gmail.com"
] |
cmambou06@gmail.com
|
804069f5e618909d089754daea39f446f16aaf0e
|
dd501feff05b5e4378804d4ee6942ee5dac32512
|
/simple_controller/scripts/topic/topic_list.py
|
999e039cb9ad17289ffb7ee24982db4938e5513d
|
[] |
no_license
|
OkDoky/capstone
|
9dea48ee55a63bbe433f7fc33048a2e2d9c01afc
|
d9e359dbbb0593c32ae8714c4836944eb6d74382
|
refs/heads/master
| 2020-05-31T05:42:59.814780
| 2019-06-04T03:54:11
| 2019-06-04T03:54:11
| 190,124,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
#!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion
from geometry_msgs.msg import Point, Twist
from math import atan2
from capstone_msgs.msg import *
rospy.init_node("topic_list")
pub = rospy.Publisher("/point_list",Smartfactory,queue_size=1)
Plist = Smartfactory()
r = rospy.Rate(4)
ppp = [1.2,2.0]
#ppp = [0.1,0.1,0.2,0.1,0.25,0.25]
#ppp = [[1.0,2.2],[2.3,3.5],[3.12,4.00]]
while not rospy.is_shutdown():
Plist.point_list = list(ppp)
pub.publish(Plist)
r.sleep()
|
[
"ie5630jk@naver.com"
] |
ie5630jk@naver.com
|
66d162ad84e8d022e18dc5d934f72d4611d318ac
|
5bf60b36f8f9c88556c74194939061654c16c7ab
|
/ops.py
|
292f87e6dad14c4605b675967f0a8694c5db73f7
|
[] |
no_license
|
Sangwon91/ESGAN
|
938446852650f86425f1838ebae7f6123baeb7b1
|
0ff14cf431022e0837790e85ae00ab8c06d90c2d
|
refs/heads/master
| 2022-10-21T17:44:49.353645
| 2020-06-17T04:51:16
| 2020-06-17T04:51:16
| 114,627,613
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,133
|
py
|
import math
import functools
import tensorflow as tf
kernel_initializer = tf.random_normal_initializer(0.0, 0.02)
dense = functools.partial(
tf.layers.dense,
activation=None,
use_bias=False,
kernel_initializer=kernel_initializer,
)
def pbc_pad3d(x, lp, rp, name="PBC"):
with tf.variable_scope(name):
if lp == 0 and rp == 0:
x = tf.identity(x)
elif lp == 0 and rp != 0:
x = tf.concat([x, x[:, :rp, :, :, :]], axis=1)
x = tf.concat([x, x[:, :, :rp, :, :]], axis=2)
x = tf.concat([x, x[:, :, :, :rp, :]], axis=3)
elif lp != 0 and rp != 0:
x = tf.concat(
[x[:, -lp:, :, :, :], x, x[:, :rp, :, :, :]], axis=1)
x = tf.concat(
[x[:, :, -lp:, :, :], x, x[:, :, :rp, :, :]], axis=2)
x = tf.concat(
[x[:, :, :, -lp:, :], x, x[:, :, :, :rp, :]], axis=3)
else:
raise Exception("lp != 0 and rp == 0")
return x
def pbc_conv3d(x, pbc=True, **kwargs):
if pbc:
# Calculate padding size.
s = kwargs["strides"]
k = kwargs["kernel_size"]
# i = input size.
i = x.get_shape().as_list()[1]
if i % s == 0:
p = max(k-s, 0)
else:
p = max(k - (i%s), 0)
# calc left padding = lp and right padding = rp
lp = p // 2
rp = p - lp
# Pad.
x = pbc_pad3d(x, lp, rp)
kwargs["padding"] = "VALID"
# Do convolution.
x = tf.layers.conv3d(x, **kwargs)
return x
conv3d = functools.partial(
pbc_conv3d,
pbc=True,
kernel_size=5,
strides=2,
padding="SAME",
activation=None,
use_bias=False,
kernel_initializer=kernel_initializer,
)
conv3d_transpose = functools.partial(
tf.layers.conv3d_transpose,
kernel_size=5,
strides=2,
padding="SAME",
activation=None,
use_bias=False,
kernel_initializer=kernel_initializer,
)
# Source: https://github.com/maxorange/voxel-dcgan/blob/master/ops.py
# Automatic updator version of batch normalization.
def batch_normalization(
x,
training,
name="batch_normalization",
decay=0.99,
epsilon=1e-5,
global_norm=True):
# Get input shape as python list.
shape = x.get_shape().as_list()
if global_norm:
# Channel-wise statistics.
size = shape[-1:]
axes = list(range(len(shape)-1))
keep_dims = False
else:
# Pixel-wise statistics.
size = [1] + shape[1:]
axes = [0]
keep_dims = True
with tf.variable_scope(name):
beta = tf.get_variable(
name="beta",
shape=size,
initializer=tf.constant_initializer(0.0),
)
gamma = tf.get_variable(
name="gamma",
shape=size,
initializer=tf.random_normal_initializer(1.0, 0.02),
)
moving_mean = tf.get_variable(
name="moving_mean",
shape=size,
initializer=tf.constant_initializer(0.0),
trainable=False,
)
moving_var = tf.get_variable(
name="moving_var",
shape=size,
initializer=tf.constant_initializer(1.0),
trainable=False,
)
# Add moving vars to the tf collection.
# The list of moving vars can be obtained with
# tf.moving_average_variables().
if moving_mean not in tf.moving_average_variables():
collection = tf.GraphKeys.MOVING_AVERAGE_VARIABLES
tf.add_to_collection(collection, moving_mean)
tf.add_to_collection(collection, moving_var)
def train_mode():
# execute at training time
batch_mean, batch_var = tf.nn.moments(
x,
axes=axes,
keep_dims=keep_dims,
)
update_mean = tf.assign_sub(
moving_mean, (1-decay) * (moving_mean-batch_mean)
)
update_var = tf.assign_sub(
moving_var, (1-decay) * (moving_var-batch_var)
)
# Automatically update global means and variances.
with tf.control_dependencies([update_mean, update_var]):
return tf.nn.batch_normalization(
x, batch_mean, batch_var, beta, gamma, epsilon)
def test_mode():
# execute at test time
return tf.nn.batch_normalization(
x, moving_mean, moving_var, beta, gamma, epsilon)
return tf.cond(training, train_mode, test_mode)
def minibatch_discrimination(x, num_kernels, dim_per_kernel, name="minibatch"):
input_x = x
with tf.variable_scope(name):
x = dense(x, units=num_kernels*dim_per_kernel)
x = tf.reshape(x, [-1, num_kernels, dim_per_kernel])
diffs = (
tf.expand_dims(x, axis=-1) -
tf.expand_dims(tf.transpose(x, [1, 2, 0]), axis=0)
)
l1_dists = tf.reduce_sum(tf.abs(diffs), axis=2)
minibatch_features = tf.reduce_sum(tf.exp(-l1_dists), axis=2)
return tf.concat([input_x, minibatch_features], axis=1)
def match_shape_with_dense(x, target, name="match_shape"):
# Get shape and replace None to -1.
shape = [i if i else -1 for i in target.get_shape().as_list()]
flat_size = 1
for s in shape[1:]:
flat_size *= s
with tf.variable_scope(name):
x = tf.layers.flatten(x)
x = dense(x, units=flat_size, use_bias=True)
# Same size as input
x = tf.reshape(x, shape=shape)
return x
if __name__ == "__main__":
import numpy as np
data = np.fromfile("/home/FRAC32/RWY/RWY.griddata", dtype=np.float32)
data = data.reshape([1, 32, 32, 32, 1])
v = tf.Variable(data)
v = pbc_pad3d(v, 22, 15)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
data = sess.run(v)
data.tofile("test.times")
|
[
"integratedmailsystem@gmail.com"
] |
integratedmailsystem@gmail.com
|
202bb1a25b00db27401572fe9e313abab8437597
|
ea2616ba021c04e704eaba635d208b51282e695f
|
/tools/math.py
|
137b2400e98d9e3db6a313ca6f7b9cb4b8336161
|
[
"MIT"
] |
permissive
|
ZhangAllen98/DAPPER
|
c5a8f1dbf606c0dae92f9da6e01576ac8224f965
|
a60b02ff1934af36fa4807429de65c7103087553
|
refs/heads/master
| 2020-04-09T23:50:14.715721
| 2018-11-17T16:40:20
| 2018-11-17T16:50:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,168
|
py
|
# Misc math
from common import *
########################
# Array manip
########################
def is1d(a):
""" Works for list and row/column arrays and matrices"""
return np.sum(asarray(asarray(a).shape) > 1) <= 1
# stackoverflow.com/q/37726830
def is_int(a):
return np.issubdtype(type(a), np.integer)
def tp(a):
"""Tranpose 1d vector"""
return a[np.newaxis].T
def exactly_1d(a):
a = np.atleast_1d(a)
assert a.ndim==1
return a
def exactly_2d(a):
a = np.atleast_2d(a)
assert a.ndim==2
return a
def ccat(*args,axis=0):
args = [np.atleast_1d(x) for x in args]
return np.concatenate(args,axis=axis)
def roll_n_sub(arr,item,i_repl=0):
"""
Example:
In: roll_n_sub(arange(4),99,0)
Out: array([99, 0, 1, 2])
In: roll_n_sub(arange(4),99,-1)
Out: array([ 1, 2, 3, 99])
"""
shift = i_repl if i_repl<0 else (i_repl+1)
arr = np.roll(arr,shift,axis=0)
arr[i_repl] = item
return arr
########################
# Ensemble matrix manip
########################
def ens_compatible(func):
"""Tranpose before and after."""
@functools.wraps(func)
def wrapr(x,*kargs,**kwargs):
return func(x.T,*kargs,**kwargs).T
return wrapr
def anom(E,axis=0):
mu = mean(E,axis=axis, keepdims=True)
A = E - mu
return A, mu.squeeze()
def center(E,rescale=True):
"""
Center sample,
but rescale to maintain its (expected) variance.
Note: similarly, one could correct a sample's 2nd moment,
(on the diagonal, or other some other subset),
however this is typically not worth it.
"""
N = E.shape[0]
A = E - mean(E,0)
if rescale:
A *= sqrt(N/(N-1))
return A
def inflate_ens(E,factor):
A, mu = anom(E)
return mu + A*factor
def weight_degeneracy(w,prec=1e-10):
return (1-w.max()) < prec
def unbias_var(w=None,N_eff=None,avoid_pathological=False):
"""
Compute unbias-ing factor for variance estimation.
wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights
"""
if N_eff is None:
N_eff = 1/(w@w)
if avoid_pathological and weight_degeneracy(w):
ub = 1 # Don't do in case of weights collapse
else:
ub = 1/(1 - 1/N_eff) # =N/(N-1) if w==ones(N)/N.
return ub
########################
# Time stepping (integration)
########################
def rk4(f, x, t, dt, order=4):
"""Runge-Kutta N-th order (explicit, non-adaptive) numerical ODE solvers."""
if order >=1: k1 = dt * f(t , x)
if order >=2: k2 = dt * f(t+dt/2, x+k1/2)
if order ==3: k3 = dt * f(t+dt , x+k2*2-k1)
if order ==4:
k3 = dt * f(t+dt/2, x+k2/2)
k4 = dt * f(t+dt , x+k3)
if order ==1: return x + k1
elif order ==2: return x + k2
elif order ==3: return x + (k1 + 4*k2 + k3)/6
elif order ==4: return x + (k1 + 2*(k2 + k3) + k4)/6
else: raise NotImplementedError
def with_rk4(dxdt,autonom=False,order=4):
"""Wrap dxdt in rk4"""
integrator = functools.partial(rk4,order=order)
if autonom: step = lambda x0,t0,dt: integrator(lambda t,x: dxdt(x),x0,np.nan,dt)
else: step = lambda x0,t0,dt: integrator( dxdt ,x0,t0 ,dt)
name = "rk"+str(order)+" integration of "+repr(dxdt)+" from "+dxdt.__module__
step = NamedFunc(step,name)
return step
def make_recursive(func,prog=False):
"""
Return a version of func() whose 2nd argument (k)
is the number of times to times apply func on its output.
Example:
def step(x,t,dt): ...
step_k = make_recursive(step)
x[k] = step_k(x0,k,t=NaN,dt)[-1]
"""
def fun_k(x0,k,*args,**kwargs):
xx = zeros((k+1,)+x0.shape)
xx[0] = x0
rg = range(k)
if isinstance(prog,str): rg = progbar(rg,prog)
elif prog: rg = progbar(rg,'Recurs.')
for i in rg:
xx[i+1] = func(xx[i],*args,**kwargs)
return xx
return fun_k
def integrate_TLM(M,dt,method='approx'):
"""
Returns the resolvent, i.e. (equivalently)
- the Jacobian of the step func.
- the integral of dU/dt = M@U, with U0=eye.
Note that M (the TLM) is assumed constant.
method:
- 'analytic': exact (assuming TLM is constant).
- 'approx' : derived from the forward-euler scheme.
- 'rk4' : higher-precision approx.
NB: 'analytic' typically requries higher inflation in the ExtKF.
"""
if method == 'analytic':
Lambda,V = np.linalg.eig(M)
resolvent = (V * exp(dt*Lambda)) @ np.linalg.inv(V)
resolvent = np.real_if_close(resolvent, tol=10**5)
else:
I = eye(M.shape[0])
if method == 'rk4':
resolvent = rk4(lambda t,U: M@U, I, np.nan, dt)
elif method.lower().startswith('approx'):
resolvent = I + dt*M
else:
raise ValueError
return resolvent
########################
# Rounding
########################
def round2(num,prec=1.0):
"""Round with specific precision.
Returns int if prec is int."""
return np.multiply(prec,np.rint(np.divide(num,prec)))
def round2sigfig(x,nfig=1):
if np.all(array(x) == 0):
return x
signs = np.sign(x)
x *= signs
return signs*round2(x,10**floor(log10(x)-nfig+1))
def round2nice(xx):
"Rounds (ordered) array to nice numbers"
r1 = round2sigfig(xx,nfig=1)
r2 = round2sigfig(xx,nfig=2)
# Assign r2 to duplicate entries in r1:
dup = np.isclose(0,np.diff(r1))
r1[1:-1][dup[:-1]] = r2[1:-1][dup[:-1]]
if dup[-1]:
r1[-2] = r2[-2]
return r1
def validate_int(x):
x_int = int(x)
assert np.isclose(x,x_int)
return x_int
# import decimal
# def round2(num,prec=1.0):
# """
# Round with specific precision.
# """
#
# rr = prec * np.round(num/prec).astype(int)
#
# # Yes, it's a finite-prec world. But esthetics are emphasized.
# # Example of uglyness to avoid:
# # >>> prec=1e-2; num=0.899;
# # >>> prec*np.round(num/prec).astype(int) # --> 0.9000000000002
# # Using non-numpy int() is better: would yield 0.9.
# # But it still does not fully avoid this effect. Example:
# # >>> prec = 1e-1; num = 0.31;
# # >>> prec * int(np.round(num/prec)) # --> 0.30000000000000004
# # The following module avoids this uglyness:
# decimal.getcontext().prec = max(1,-int(ceil(log10(prec))))
#
# if hasattr(rr,'__iter__'):
# rr = array([float(decimal.Decimal(str(r))) for r in rr])
# else:
# rr = float(decimal.Decimal(str(rr)))
# return rr
#
# def round2nice(xx,expo=None,irreg=0.0,v=False):
# """
# Rounds (ordered) array to nice numbers,
# without introducing any duplicates.
#
# irreg: float between 0 and 1 controlling the prefererence
# between (0) regular spacing and (1) less sigfig.
# """
#
# # # Init
# # if expos is None:
# # expos = array([int(x) if x!=0 else 0 for x in floor(log10(xx))])
#
# # N = len(xx)
#
# # # Round array with prec=10**expo
# # rr = [round2(x,10**e) for x,e in zip(xx,expos)]
# # rr1 = [round2(x,10**e+1) for x,e in zip(xx,expos)]
#
# # Init
# if expo is None:
# expo = int(floor(log10(xx.max())))-1
#
# N = len(xx)
#
# # Round array with prec=10**expo
# rr = round2(xx,10**expo)
# rr1 = round2(xx,10**(expo+1))
#
#
# if irreg:
# i = np.argmin(np.abs(xx-rr1[0]))
# if i==0 or i==N-1:
# # Edge cases not easy to handle,
# # because they would need to be compared to the "outer" array.
# # We're opting to set them to less-sigfic.
# #rr[i] = rr1[i]
# pass
# else:
# irreg2 = irreg**2 # more 'human readable'
# maxratio = 1 + irreg
# a = rr1[i] - rr[i-1]
# b = rr[i+1] - rr1[i]
# if max(a/b, b/a) > 1/irreg2:
# rr[i] = rr1[i]
#
#
# # Find intervals of duplicates
# edges = [] # boundries of intervals
# dups = [False] + np.isclose(0,np.diff(rr)).tolist()
# for i in arange(N-1):
# if (not dups[i]) and dups[i+1]:
# edges += [ [i,'pending'] ]
# if dups[i] and (not dups[i+1]):
# edges[-1][1] = i+1
#
# if v:
# spell_out(expo)
# print(np.vstack([rr1,rr,xx,arange(N)]))
# spell_out(edges,"\n")
#
# if len(edges)==0:
# return rr
#
# # Sub-arrays
# arrs = [ rr[:edges[0][0]] ]
# for i,(a,b) in enumerate(edges):
# d1_next = edges[i+1][0] if i<(len(edges)-1) else N
# # Recurse
# arrs += [ round2nice(xx[a:b], expo-1, irreg, v) ]
# # Add interval of non-duplicates
# arrs += [ rr[b:d1_next] ]
# #spell_out(arrs)
# return np.concatenate(arrs)
########################
# Misc
########################
def LogSp(start,stop,num=50,**kwargs):
"""Log space defined through non-log numbers"""
assert 'base' not in kwargs, "The base is irrelevant."
return np.logspace(log10(start),log10(stop),num=num,base=10)
def CurvedSpace(start,end,curve,N):
"Monotonic series (space). Set 'curve' param between 0,1."
x0 = 1/curve - 1
span = end - start
return start + span*( LogSp(x0,1+x0,N) - x0 )
def circulant_ACF(C,do_abs=False):
"""
Compute the ACF of C,
assuming it is the cov/corr matrix
of a 1D periodic domain.
"""
m = len(C)
#cols = np.flipud(sla.circulant(arange(m)[::-1]))
cols = sla.circulant(arange(m))
ACF = zeros(m)
for i in range(m):
row = C[i,cols[i]]
if do_abs:
row = abs(row)
ACF += row
# Note: this actually also accesses masked values in C.
return ACF/m
########################
# Linear Algebra
########################
def mrdiv(b,A):
return nla.solve(A.T,b.T).T
def mldiv(A,b):
return nla.solve(A,b)
def truncate_rank(s,threshold,avoid_pathological):
"Find r such that s[:r] contains the threshold proportion of s."
assert isinstance(threshold,float)
if threshold == 1.0:
r = len(s)
elif threshold < 1.0:
r = np.sum(np.cumsum(s)/np.sum(s) < threshold)
r += 1 # Hence the strict inequality above
if avoid_pathological:
# If not avoid_pathological, then the last 4 diag. entries of
# reconst( *tsvd(eye(400),0.99) )
# will be zero. This is probably not intended.
r += np.sum(np.isclose(s[r-1], s[r:]))
else:
raise ValueError
return r
def tsvd(A, threshold=0.99999, avoid_pathological=True):
"""
Truncated svd.
Also automates 'full_matrices' flag.
threshold: if
- float, < 1.0 then "rank" = lowest number such that the
"energy" retained >= threshold
- int, >= 1 then "rank" = threshold
avoid_pathological: avoid truncating (e.g.) the identity matrix.
NB: only applies for float threshold.
"""
m,n = A.shape
full_matrices = False
if is_int(threshold):
# Assume specific number is requested
r = threshold
assert 1 <= r <= max(m,n)
if r > min(m,n):
full_matrices = True
# SVD
U,s,VT = sla.svd(A, full_matrices)
if isinstance(threshold,float):
# Assume proportion is requested
r = truncate_rank(s,threshold,avoid_pathological)
# Truncate
U = U [:,:r]
VT = VT[ :r]
s = s [ :r]
return U,s,VT
def svd0(A):
"""
Compute the
- full svd if nrows > ncols
- reduced svd otherwise.
As in Matlab: svd(A,0),
except that the input and output are transposed, in keeping with DAPPER convention.
It contrasts with scipy.linalg's svd(full_matrice=False) and Matlab's svd(A,'econ'),
both of which always compute the reduced svd.
For reduction down to rank, see tsvd() instead.
"""
m,n = A.shape
if m>n: return sla.svd(A, full_matrices=True)
else: return sla.svd(A, full_matrices=False)
def pad0(ss,N):
out = zeros(N)
out[:len(ss)] = ss
return out
def reconst(U,s,VT):
"""
Reconstruct matrix from svd. Supports truncated svd's.
A == reconst(*tsvd(A,1.0)).
Also see: sla.diagsvd().
"""
return (U * s) @ VT
def tinv(A,*kargs,**kwargs):
"""
Inverse based on truncated svd.
Also see sla.pinv2().
"""
U,s,VT = tsvd(A,*kargs,**kwargs)
return (VT.T * s**(-1.0)) @ U.T
########################
# Setup facilation
########################
def Id_op():
return NamedFunc(lambda *args: args[0], "Id operator")
def Id_mat(m):
I = np.eye(m)
return NamedFunc(lambda x,t: I, "Id("+str(m)+") matrix")
def linear_model_setup(M):
"M is normalized wrt step length dt."
M = np.asarray(M) # sparse or matrix classes not supported
m = len(M)
@ens_compatible
def model(x,t,dt): return dt*(M@x)
def jacob(x,t,dt): return dt*M
f = {
'm' : m,
'model': model,
'jacob': jacob,
}
return f
def equi_spaced_integers(m,p):
"""Provide a range of p equispaced integers between 0 and m-1"""
return np.round(linspace(floor(m/p/2),ceil(m-m/p/2-1),p)).astype(int)
def direct_obs_matrix(m,obs_inds):
"""Matrix that "picks" state elements obs_inds out of range(m)"""
p = len(obs_inds)
H = zeros((p,m))
H[range(p),obs_inds] = 1
return H
def partial_direct_obs_setup(m,obs_inds):
p = len(obs_inds)
H = direct_obs_matrix(m,obs_inds)
@ens_compatible
def model(x,t): return x[obs_inds]
def jacob(x,t): return H
h = {
'm' : p,
'model': model,
'jacob': jacob,
}
return h
|
[
"patrick.n.raanes@gmail.com"
] |
patrick.n.raanes@gmail.com
|
a72b82041d226086037c79f94f44aa365b69f193
|
12cb93fbc6dc83f2574df4c5b612e8a701aed310
|
/sprint12/1/121H.py
|
4e3245d3565d21f81d7a90192f00e8a07f00c238
|
[] |
no_license
|
dzanto/algorithmics
|
85b871ea72069b42d2771b021d0ba02adbd2f021
|
462f4837818eb4f654228e051fcbff0902cad658
|
refs/heads/master
| 2023-01-09T04:58:06.280813
| 2020-11-11T10:41:38
| 2020-11-11T10:41:38
| 303,316,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
text = input().lower()
bad_chars = [';', ':', '!', "*", " ", ",", "."]
for i in bad_chars:
text = text.replace(i, '')
text = list(text)
if text == text[::-1]:
print('True')
else:
print('False')
|
[
"dzanto@gmail.com"
] |
dzanto@gmail.com
|
83ad61d2619bb1ca26fad05f73878abaf6e94822
|
57cf004cb7b63dd371753608b51261a56e830780
|
/anthracite/model.py
|
a280fbf99820895f0c78669a55e1b115cc59a527
|
[] |
no_license
|
seanbhart/anthracite-python
|
7cc88f660f98e1f13fcc7738768df82ccd6bdb55
|
50a6d028545d46f62ee94a188f40cc33cb2e996d
|
refs/heads/main
| 2023-03-10T10:55:23.597728
| 2021-02-28T16:25:21
| 2021-02-28T16:25:21
| 332,944,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,660
|
py
|
import logging
from google.cloud import firestore
from db_local import db
from language_analysis.language_analysis import sentiment_analysis
from utils import settings
class Ticker:
"""A db object to retain settings regarding
which and how to search for Ticker symbols in text.
"""
def __init__(self,
ticker: str,
status: int,
name: str,
exchange: str,
):
self.ticker = ticker
self.status = status
self.name = name
self.exchange = exchange
def __init__(self, data: dict):
self.ticker = data['ticker']
self.status = data['status']
self.name = data['name']
self.exchange = data['exchange']
class Notion:
"""A db object mirroring key data components
in a praw subreddit submission response. Also
includes fields with default values for storing
analysis values.
"""
def __init__(self,
host: str,
host_id: str,
text: str,
created: float,
upvotes: int,
downvotes: int = 0,
award_count: int = 0,
response_count: int = 0,
media_link: str = None,
categories: [str] = [],
parent: str = None,
associated: [str] = [],
sentiment: float = 0.0,
magnitude: float = 0.0,
tickers: [str] = [],
confidence: float = 0.0,
db_id: str = None,
):
self.host = host
self.host_id = host_id
self.text = text
self.created = created
self.upvotes = upvotes
self.downvotes = downvotes
self.award_count = award_count
self.response_count = response_count
self.media_link = media_link
self.categories = categories
self.parent = parent
self.associated = associated
self.sentiment = sentiment
self.magnitude = magnitude
self.tickers = tickers
self.confidence = confidence
self.db_id = db_id
def upload(self):
"""Upload a Notion to the db, but since it might already
exist, query for the submission based on the stored id
the host passed for this data. Use the local db to store
a record of existing Notions - reduce read requests to Firestore.
"""
# DO NOT update old data - this leads to unnecessary writes.
if not db.reddit_submission_exists(self.host_id):
# Store the submission in the local db first to prevent
# another thread from uploading this submission while
# it is still being processed (below).
db.reddit_insert(self.host_id)
# Run language analysis ONLY AFTER knowing this entry will be uploaded -
# Google Natural Language analysis can get expensive.
sentiment = sentiment_analysis(self.text)
self.sentiment = sentiment.sentiment
self.magnitude = sentiment.magnitude
client = firestore.Client()
notion_doc_ref = client.collection(settings.Firestore.collection_notion).document()
# Remove all None values from the dict before uploading
filtered = {k: v for k, v in self.__dict__.items() if v is not None}
notion_doc_ref.set(filtered) # , merge=True)
# # Update the tickers associated with the Notion to show new data was added
# NOTE: Jan 29, 2021: No need to update Ticker db objects for now - no client
# is currently listening to Ticker objects for updates.
# self.update_tickers()
def update_tickers(self):
"""Update all Tickers found for this Notion to indicate new data
has been added for this Ticker. This is useful if a client is
listening for changes to Ticker documents.
"""
client = firestore.Client()
ticker_docs = client.collection(settings.Firestore.collection_ticker) \
.where(u'ticker', u'in', self.tickers) \
.get()
logging.info(f"(update_tickers) FIREBASE READ TICKER DOC COUNT: {len(ticker_docs)}")
# For each ticker in the Notion, update the ticker's latest timestamp
# with the Notion's created time
for td in ticker_docs:
ticker_doc_ref = client.collection(settings.Firestore.collection_ticker).document(td.id)
ticker_doc_ref.set({
'latest_notion': self.created
}, merge=True)
|
[
"sean@tangojlabs.com"
] |
sean@tangojlabs.com
|
f32d758b89677311a4b99f70012e646fba66a3b3
|
589227ce144742ec2bc1ee3bfdb9aad5ac446a9e
|
/market/models.py
|
440740691d747226920d62d2855ed507633e07a4
|
[] |
no_license
|
amrtito91/Web-Market-by-Flask
|
112a5d30c1244efe3735d342df812f5297e9e732
|
8640755323461d7465ed11002b3ecccb8cd354f3
|
refs/heads/main
| 2023-08-02T01:57:03.091293
| 2021-09-26T10:23:37
| 2021-09-26T10:23:37
| 410,519,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
from market import db, login_manager
from market import bcrypt
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(length=30), nullable=False, unique=True)
email_address = db.Column(db.String(length=50), nullable=False, unique=True)
password_hash = db.Column(db.String(length=60), nullable=False)
budget = db.Column(db.Integer(), nullable=False, default=1000)
items = db.relationship('Item', backref='owned_user', lazy=True)
@property
def prettier_budget(self):
if len(str(self.budget)) >= 4:
return f'{str(self.budget)[:-3]},{str(self.budget)[-3:]}€'
else:
return f'{self.budget}€'
@property
def password(self):
return self.password
@password.setter
def password(self, password_plain_text):
self.password_hash = bcrypt.generate_password_hash(password_plain_text).decode('utf-8')
def check_password_correction(self, attempted_password):
return bcrypt.check_password_hash(self.password_hash, attempted_password)
def can_purchase(self, item_obj):
return self.budget >= item_obj.price
def can_sell(self, item_obj):
return item_obj in self.items
class Item(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(length=30), nullable=False, unique=True)
price = db.Column(db.Integer(), nullable=False)
barcode = db.Column(db.String(length=12), nullable=False, unique=True)
description = db.Column(db.String(length=1024), nullable=False, unique=True)
owner = db.Column(db.Integer(), db.ForeignKey('user.id'))
def __repr__(self):
return f"Item {self.name}"
def buy(self, user):
self.owner = user.id
user.budget -= self.price
db.session.commit()
def sell(self, user):
self.owner = None
user.budget += self.price
db.session.commit()
|
[
"noreply@github.com"
] |
amrtito91.noreply@github.com
|
19e5e6f78c097f1a9d427f933c66f574af8e9f14
|
e5989154160f6317ad3edc1a046119500a25bfd2
|
/torrent/bencode.py
|
1d9f7d4ce460dbee17acbcd987a0f5fa757dfb5d
|
[] |
no_license
|
terasaur/pyterasaur
|
e77c178a8c9e95d0c80c249e1c5bf90831841b31
|
bbd5821aa9c952d18421a88b2ec73b8129061a9b
|
refs/heads/master
| 2021-01-13T02:08:39.491272
| 2013-03-20T15:03:34
| 2013-03-20T15:03:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,945
|
py
|
# Written by Petru Paler
# see LICENSE.txt for license information
def decode_int(x, f):
f += 1
newf = x.index('e', f)
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
lastkey = None
while x[f] != 'e':
k, f = decode_string(x, f)
if lastkey >= k:
raise ValueError
lastkey = k
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType
class Bencached(object):
__slots__ = ['bencoded']
def __init__(self, s):
self.bencoded = s
def encode_bencached(x,r):
r.append(x.bencoded)
def encode_int(x, r):
r.extend(('i', str(x), 'e'))
def encode_string(x, r):
r.extend((str(len(x)), ':', x))
def encode_list(x, r):
r.append('l')
for i in x:
encode_func[type(i)](i, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k, v in ilist:
r.extend((str(len(k)), ':', k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[type(Bencached(0))] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
try:
from types import BooleanType
encode_func[BooleanType] = encode_int
except ImportError:
pass
def bencode(x):
r = []
encode_func[type(x)](x, r)
return ''.join(r)
try:
import psyco
psyco.bind(bdecode)
psyco.bind(bencode)
except ImportError:
pass
|
[
"john@ibiblio.org"
] |
john@ibiblio.org
|
463d11f29fc762a44190a6bedf244c7519e0506f
|
c97f544e5d6b06288cf84d12fb627601ee97e6b3
|
/03templates.py
|
44b75d67cc96b0ac35412193eaa4b718853462f7
|
[] |
no_license
|
13794521695/tornado_kj
|
667c68efce3fd9a6b32fdc3b729dd6fbffe9cc49
|
5ddcba1dbaa2aa1b45f3e1122b4843c86a2948c5
|
refs/heads/master
| 2020-04-07T13:21:23.237277
| 2018-11-20T15:20:16
| 2018-11-20T15:20:16
| 158,401,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,599
|
py
|
import time
import tornado.web
import tornado.ioloop
import tornado.options
import tornado.httpserver
from tornado.options import define, options
define('port', default=8000, help='run port', type=int)
class TemplatesHandler(tornado.web.RequestHandler):
def get(self):
self.write('templates') #依然会在跳转页面上的头部打印template。
# self.render('01in_out.html')
self.render('test.html')
def post(self, *args, **kwargs):
user = self.get_argument('name', 'no')
urllist = [
('https://www.shiguangkey.com/', '时光课堂'),
('https://www.baidu.com/', '百度'),
('https://www.zhihu.com/', '知乎'),
]
atga = "<a href='https://www.baidu.com' target='_blank'>___百度___</a><br>" # 转义
self.render('02templates.html',
username=user, #可以用到这里面的变量
time=time, #时间模块time传入到模拟里,模板也能直接调用函数。
urllist=urllist,
atga=atga,
)
if __name__ == '__main__':
tornado.options.parse_command_line()
app = tornado.web.Application(
handlers=[
(r'/tem', TemplatesHandler),
],
template_path='templates',
static_path='static',
autoescape=None, #可以让所有模板文件取消转义。
debug=True
)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
[
"704213848@qq.com"
] |
704213848@qq.com
|
2ee4eb2074f9a863cac205aa5fa3493321175eb7
|
91bbc1674278092f912d9a336f294ec585512616
|
/commodity forecast/DCMecha_temp/StockCalculation.py
|
f029d96bf4ae9e21d3b26a843cd1d5d60b08dc31
|
[
"Apache-2.0"
] |
permissive
|
qingyinji/python_tool
|
5688273290df5313ffa37f208663b6cf9b537369
|
a06be8c8dc45055be4a9426585837465b40b7f91
|
refs/heads/main
| 2023-04-03T13:49:36.406981
| 2021-04-09T13:19:11
| 2021-04-09T13:19:11
| 355,482,430
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
from DataPond import Pond
import DataChange as Change
from DataPoly import Poly
from MysqlApl import DB
from copy import deepcopy
def cache(fun):
cache_data = []
def wrapper(self, data):
if data[0] == 0:
cache_data.clear()
return None
cache_data.append(deepcopy(data))
if len(cache_data) > 2:
fun(self, cache_data)
return wrapper
class StockCal:
def __init__(self):
self.pond = Pond(pond_size=8) # 初始化数据池
self.poly = Poly(mode='ave') # 初始化拟合
def fit(self, data):
self.pond.append(data) # 数据池
data_x, data_y = self.pond.get() #
data_x = Change.data_filter(data_x, mode='kal')
data_x = Change.data_change(data_x) #
self.poly.fit(data_x, data_y)
@cache
def predict(self, data):
data = Change.data_filter(data, mode='kal') # 数据清洗-过滤
data = Change.data_change(data)[-3:] # 数据变换
ret = self.poly.predict(data, mode='o') # 估算
return ret
|
[
"js.liang@tuya.com"
] |
js.liang@tuya.com
|
8932948d9d1d5868adc9b5aa6521f1a754b82aac
|
79eae6db52c55c1946c01dead460422904b003c7
|
/msgs_stack/kraken_msgs/src/kraken_msgs/msg/_orientActionFeedback.py
|
8d1e3c07a148865152437c75c8c8b72e4ef5cf7e
|
[] |
no_license
|
VishnuDuttSharma/kraken_3.0
|
8fed84f31d73968258623d30f65e0790e7c5f5dc
|
69ccddaa9b226d03b2c67ea0e7a3bd0be581899c
|
refs/heads/master
| 2021-01-20T21:52:47.681347
| 2014-05-23T20:11:01
| 2014-05-23T20:11:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,660
|
py
|
"""autogenerated by genpy from kraken_msgs/orientActionFeedback.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import kraken_msgs.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class orientActionFeedback(genpy.Message):
_md5sum = "2167757dbef5e654b8ae17e47d0d389c"
_type = "kraken_msgs/orientActionFeedback"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
orientFeedback feedback
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: kraken_msgs/orientFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#feedback
float32 percent_done
"""
__slots__ = ['header','status','feedback']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','kraken_msgs/orientFeedback']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,feedback
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(orientActionFeedback, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = kraken_msgs.msg.orientFeedback()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.feedback = kraken_msgs.msg.orientFeedback()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_f.pack(self.feedback.percent_done))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = kraken_msgs.msg.orientFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _struct_B.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
start = end
end += 4
(self.feedback.percent_done,) = _struct_f.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_f.pack(self.feedback.percent_done))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = kraken_msgs.msg.orientFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _struct_B.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
start = end
end += 4
(self.feedback.percent_done,) = _struct_f.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_B = struct.Struct("<B")
_struct_2I = struct.Struct("<2I")
_struct_f = struct.Struct("<f")
|
[
"dharmana.prudhvi@gmail.com"
] |
dharmana.prudhvi@gmail.com
|
d4f41b77c08a549a9e2c0ff2fe0e97fb8cc167dc
|
202bb7c5e37d3f117315e8bba3bd21e84b48fe6b
|
/alpha/WHSTINKER14.py
|
ecf708db7cd0ffe9bbb46120197ab0f16ebfd832
|
[] |
no_license
|
haishuowang/work_whs
|
897cd10a65035191e702811ed650061f7109b9fa
|
b6a17aefc5905ad9c11dba4d745591ed92b1e386
|
refs/heads/master
| 2020-07-03T10:30:14.231858
| 2020-06-09T08:47:18
| 2020-06-09T08:47:18
| 201,877,822
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,569
|
py
|
import numpy as np
import pandas as pd
import os
import sys
from itertools import product, permutations, combinations
from datetime import datetime
import time
import matplotlib.pyplot as plt
from collections import OrderedDict
import sys
sys.path.append("/mnt/mfs/LIB_ROOT")
import open_lib.shared_paths.path as pt
from open_lib.shared_tools import send_email
def plot_send_result(pnl_df, sharpe_ratio, subject, text=''):
figure_save_path = os.path.join('/mnt/mfs/dat_whs', 'tmp_figure')
plt.figure(figsize=[16, 8])
plt.plot(pnl_df.index, pnl_df.cumsum(), label='sharpe_ratio={}'.format(sharpe_ratio))
plt.grid()
plt.legend()
plt.savefig(os.path.join(figure_save_path, '{}.png'.format(subject)))
plt.close()
to = ['whs@yingpei.com']
filepath = [os.path.join(figure_save_path, '{}.png'.format(subject))]
send_email.send_email(text, to, filepath, subject)
class BackTest:
@staticmethod
def AZ_Load_csv(target_path, index_time_type=True):
if index_time_type:
target_df = pd.read_table(target_path, sep='|', index_col=0, low_memory=False, parse_dates=True)
else:
target_df = pd.read_table(target_path, sep='|', index_col=0, low_memory=False)
return target_df
@staticmethod
def AZ_Catch_error(func):
def _deco(*args, **kwargs):
try:
ret = func(*args, **kwargs)
except:
ret = sys.exc_info()
print(ret[0], ":", ret[1])
return ret
return _deco
@staticmethod
def AZ_Time_cost(func):
t1 = time.time()
def _deco(*args, **kwargs):
ret = func(*args, **kwargs)
return ret
t2 = time.time()
print(f'cost_time: {t2-t1}')
return _deco
@staticmethod
def AZ_Sharpe_y(pnl_df):
return round((np.sqrt(250) * pnl_df.mean()) / pnl_df.std(), 4)
@staticmethod
def AZ_MaxDrawdown(asset_df):
return asset_df - np.maximum.accumulate(asset_df)
def AZ_Col_zscore(self, df, n, cap=None, min_periods=1):
df_mean = self.AZ_Rolling_mean(df, n, min_periods=min_periods)
df_std = df.rolling(window=n, min_periods=min_periods).std()
target = (df - df_mean) / df_std
if cap is not None:
target[target > cap] = cap
target[target < -cap] = -cap
return target
@staticmethod
def AZ_Row_zscore(df, cap=None):
df_mean = df.mean(axis=1)
df_std = df.std(axis=1)
target = df.sub(df_mean, axis=0).div(df_std, axis=0)
if cap is not None:
target[target > cap] = cap
target[target < -cap] = -cap
return target
@staticmethod
def AZ_Rolling(df, n, min_periods=1):
return df.rolling(window=n, min_periods=min_periods)
@staticmethod
def AZ_Rolling_mean(df, n, min_periods=1):
target = df.rolling(window=n, min_periods=min_periods).mean()
target.iloc[:n - 1] = np.nan
return target
@staticmethod
def AZ_Rolling_sharpe(pnl_df, roll_year=1, year_len=250, min_periods=1, cut_point_list=None, output=False):
if cut_point_list is None:
cut_point_list = [0.05, 0.33, 0.5, 0.66, 0.95]
rolling_sharpe = pnl_df.rolling(int(roll_year * year_len), min_periods=min_periods) \
.apply(lambda x: np.sqrt(year_len) * x.mean() / x.std(), raw=True)
rolling_sharpe.iloc[:int(roll_year * year_len) - 1] = np.nan
cut_sharpe = rolling_sharpe.quantile(cut_point_list)
if output:
return rolling_sharpe, cut_sharpe.round(4)
else:
return cut_sharpe.round(4)
@staticmethod
def AZ_Pot(pos_df, asset_last):
"""
计算 pnl/turover*10000的值,衡量cost的影响
:param pos_df: 仓位信息
:param asset_last: 最后一天的收益
:return:
"""
trade_times = pos_df.diff().abs().sum().sum()
if trade_times == 0:
return 0
else:
pot = asset_last / trade_times * 10000
return round(pot, 2)
@staticmethod
def AZ_Normal_IC(signal, pct_n, min_valids=None, lag=0):
signal = signal.shift(lag)
signal = signal.replace(0, np.nan)
corr_df = signal.corrwith(pct_n, axis=1).dropna()
if min_valids is not None:
signal_valid = signal.count(axis=1)
signal_valid[signal_valid < min_valids] = np.nan
signal_valid[signal_valid >= min_valids] = 1
corr_signal = corr_df * signal_valid
else:
corr_signal = corr_df
return round(corr_signal, 6)
def AZ_Normal_IR(self, signal, pct_n, min_valids=None, lag=0):
corr_signal = self.AZ_Normal_IC(signal, pct_n, min_valids, lag)
ic_mean = corr_signal.mean()
ic_std = corr_signal.std()
ir = ic_mean / ic_std
return ir, corr_signal
@staticmethod
def AZ_Leverage_ratio(asset_df):
"""
返回250天的return/(负的 一个月的return)
:param asset_df:
:return:
"""
asset_20 = asset_df - asset_df.shift(20)
asset_250 = asset_df - asset_df.shift(250)
if asset_250.mean() > 0:
return asset_250.mean() / (-asset_20.min())
else:
return asset_250.mean() / (-asset_20.max())
@staticmethod
def AZ_Locked_date_deal(position_df, locked_df):
"""
处理回测中停牌,涨停等 仓位需要锁死的情况
:param position_df:仓位信息
:param locked_df:停牌 涨跌停等不能交易信息(能交易记为1, 不能记为nan)
:return:
"""
position_df_adj = (position_df * locked_df).dropna(how='all', axis=0) \
.fillna(method='ffill')
return position_df_adj
@staticmethod
def AZ_Path_create(target_path):
"""
添加新路径
:param target_path:
:return:
"""
if not os.path.exists(target_path):
os.makedirs(target_path)
@staticmethod
def AZ_split_stock(stock_list):
"""
在stock_list中寻找A股代码
:param stock_list:
:return:
"""
eqa = [x for x in stock_list if (x.startswith('0') or x.startswith('3')) and x.endwith('SZ')
or x.startswith('6') and x.endwith('SH')]
return eqa
@staticmethod
def AZ_add_stock_suffix(stock_list):
"""
whs
给stock_list只有数字的 A股代码 添加后缀
如 000001 运行后 000001.SZ
:param stock_list:
:return:
"""
return list(map(lambda x: x + '.SH' if x.startswith('6') else x + '.SZ', stock_list))
@staticmethod
def AZ_Delete_file(target_path, except_list=None):
if except_list is None:
except_list = []
assert type(except_list) == list
file_list = os.listdir(target_path)
file_list = list(set(file_list) - set(except_list))
for file_name in sorted(file_list):
os.remove(os.path.join(target_path, file_name))
@staticmethod
def AZ_turnover(pos_df):
diff_sum = pos_df.diff().abs().sum().sum()
pos_sum = pos_df.abs().sum().sum()
if pos_sum == 0:
return .0
return diff_sum / float(pos_sum)
@staticmethod
def AZ_annual_return(pos_df, return_df):
temp_pnl = (pos_df * return_df).sum().sum()
temp_pos = pos_df.abs().sum().sum()
if temp_pos == 0:
return .0
else:
return temp_pnl * 250.0 / temp_pos
def AZ_fit_ratio(self, pos_df, return_df):
"""
传入仓位 和 每日收益
:param pos_df:
:param return_df:
:return: 时间截面上的夏普 * sqrt(abs(年化)/换手率), 当换手率为0时,返回0
"""
sharp_ratio = self.AZ_Sharpe_y((pos_df * return_df).sum(axis=1))
ann_return = self.AZ_annual_return(pos_df, return_df)
turnover = self.AZ_turnover(pos_df)
if turnover == 0:
return .0
else:
return round(sharp_ratio * np.sqrt(abs(ann_return) / turnover), 2)
def AZ_fit_ratio_rolling(self, pos_df, pnl_df, roll_year=1, year_len=250, min_periods=1, cut_point_list=None,
output=False):
if cut_point_list is None:
cut_point_list = [0.05, 0.33, 0.5, 0.66, 0.95]
rolling_sharpe, cut_sharpe = self.AZ_Rolling_sharpe(pnl_df, roll_year=roll_year, year_len=year_len,
min_periods=min_periods, cut_point_list=cut_point_list,
output=True)
rolling_return = pnl_df.rolling(int(roll_year * year_len), min_periods=min_periods).apply(
lambda x: 250.0 * x.sum().sum())
rolling_diff_pos = pos_df.diff().abs().sum(axis=1).rolling(int(roll_year * year_len),
min_periods=min_periods).apply(
lambda x: x.sum().sum())
rolling_return.iloc[:int(roll_year * year_len) - 1] = np.nan
rolling_diff_pos.iloc[:int(roll_year * year_len) - 1] = np.nan
rolling_fit_ratio = rolling_sharpe * np.sqrt(abs(rolling_return) / rolling_diff_pos)
rolling_fit_ratio = rolling_fit_ratio.replace(np.inf, np.nan)
rolling_fit_ratio = rolling_fit_ratio.replace(-np.inf, np.nan)
cut_fit = rolling_fit_ratio.quantile(cut_point_list)
return cut_fit.round(4)
@staticmethod
def AZ_VAR(pos_df, return_df, confidence_level, backward_len=500, forwward_len=250):
tradeDayList = pos_df.index[:-forwward_len]
col01 = return_df.columns[0]
varList = []
cut_point_list = [0.05, 0.33, 0.5, 0.66, 0.95]
if len(tradeDayList) == 0:
print('数据量太少')
else:
for tradeDay in tradeDayList:
tempPos = pos_df.loc[tradeDay, :]
dayIndex = list(return_df.loc[:tradeDay, col01].index[-backward_len:]) + list(
return_df.loc[tradeDay:, col01].index[:forwward_len])
return_df_c = return_df[list(tempPos.index)]
historyReturn = list(return_df_c.mul(tempPos, axis=1).loc[dayIndex[0]:dayIndex[-1], :].sum(axis=1))
historyReturn.sort()
varList.append(historyReturn[int(len(historyReturn) * confidence_level)])
var = pd.DataFrame({'var': varList}, index=tradeDayList)
var = var.dropna()
var_fit = var.quantile(cut_point_list)
return list(var_fit['var'])
bt = BackTest()
def filter_all(cut_date, pos_df_daily, pct_n, if_return_pnl=False, if_only_long=False):
pnl_df = (pos_df_daily * pct_n).sum(axis=1)
pnl_df = pnl_df.replace(np.nan, 0)
# pnl_df = pd.Series(pnl_df)
# 样本内表现
return_in = pct_n[pct_n.index < cut_date]
pnl_df_in = pnl_df[pnl_df.index < cut_date]
asset_df_in = pnl_df_in.cumsum()
last_asset_in = asset_df_in.iloc[-1]
pos_df_daily_in = pos_df_daily[pos_df_daily.index < cut_date]
pot_in = AZ_Pot(pos_df_daily_in, last_asset_in)
leve_ratio = AZ_Leverage_ratio(asset_df_in)
if leve_ratio < 0:
leve_ratio = 100
sharpe_q_in_df = bt.AZ_Rolling_sharpe(pnl_df_in, roll_year=1, year_len=250, min_periods=1,
cut_point_list=[0.3, 0.5, 0.7], output=False)
sp_in = bt.AZ_Sharpe_y(pnl_df_in)
fit_ratio = bt.AZ_fit_ratio(pos_df_daily_in, return_in)
ic = round(bt.AZ_Normal_IC(pos_df_daily_in, pct_n, min_valids=None, lag=0).mean(), 6)
sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d = sharpe_q_in_df.values
in_condition_u = sharpe_q_in_df_u > 0.9 and leve_ratio > 1
in_condition_d = sharpe_q_in_df_d < -0.9 and leve_ratio > 1
# 分双边和只做多
if if_only_long:
in_condition = in_condition_u
else:
in_condition = in_condition_u | in_condition_d
if sharpe_q_in_df_m > 0:
way = 1
else:
way = -1
# 样本外表现
pnl_df_out = pnl_df[pnl_df.index >= cut_date]
out_condition, sharpe_q_out = out_sample_perf_c(pnl_df_out, way=way)
if if_return_pnl:
return in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df
else:
return in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out
def mul_fun(a, b):
a_l = a.where(a > 0, 0)
a_s = a.where(a < 0, 0)
b_l = b.where(b > 0, 0)
b_s = b.where(b < 0, 0)
pos_l = a_l.mul(b_l)
pos_s = a_s.mul(b_s)
pos = pos_l.sub(pos_s)
return pos
def sub_fun(a, b):
return a.sub(b)
def add_fun(a, b):
return a.add(b)
def AZ_Cut_window(df, begin_date, end_date=None, column=None):
if column is None:
if end_date is None:
return df[df.index > begin_date]
else:
return df[(df.index > begin_date) & (df.index < end_date)]
else:
if end_date is None:
return df[df[column] > begin_date]
else:
return df[(df[column] > begin_date) & (df[column] < end_date)]
def AZ_Leverage_ratio(asset_df):
"""
返回250天的return/(负的 一个月的return)
:param asset_df:
:return:
"""
asset_20 = asset_df - asset_df.shift(20)
asset_250 = asset_df - asset_df.shift(250)
if asset_250.mean() > 0:
return round(asset_250.mean() / (-asset_20.min()), 2)
else:
return round(asset_250.mean() / (-asset_20.max()), 2)
def pos_daily_fun(df, n=5):
return df.rolling(window=n, min_periods=1).sum()
def AZ_Pot(pos_df_daily, last_asset):
trade_times = pos_df_daily.diff().abs().sum().sum()
if trade_times == 0:
return 0
else:
pot = last_asset / trade_times * 10000
return round(pot, 2)
def out_sample_perf_c(pnl_df_out, way=1):
# 根据sharpe大小,统计样本外的表现
# if cut_point_list is None:
# cut_point_list = [0.30]
# if way == 1:
# rolling_sharpe, cut_sharpe = \
# bt.AZ_Rolling_sharpe(pnl_df_out, roll_year=0.5, year_len=250, cut_point_list=cut_point_list, output=True)
# else:
# rolling_sharpe, cut_sharpe = \
# bt.AZ_Rolling_sharpe(-pnl_df_out, roll_year=0.5, year_len=250, cut_point_list=cut_point_list, output=True)
if way == 1:
sharpe_out = bt.AZ_Sharpe_y(pnl_df_out)
else:
sharpe_out = bt.AZ_Sharpe_y(-pnl_df_out)
out_condition = sharpe_out > 0.8
return out_condition, round(sharpe_out * way, 2)
def create_fun_set_2(fun_set):
mix_fun_set = []
for fun_1, fun_2 in product(fun_set, repeat=2):
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set += [{0}_{1}_fun]'.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
def create_fun_set_2_(fun_set):
mix_fun_set = {}
for fun_1, fun_2 in product(fun_set, repeat=2):
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set[\'{0}_{1}_fun\'] = {0}_{1}_fun'
.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
def create_fun_set_2_crt():
fun_2 = mul_fun
mix_fun_set = []
for fun_1 in [add_fun, sub_fun, mul_fun]:
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set += [{0}_{1}_fun]'.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
def create_fun_set_2_crt_():
fun_2 = mul_fun
mix_fun_set = dict()
for fun_1 in [add_fun, sub_fun, mul_fun]:
exe_str_1 = """def {0}_{1}_fun(a, b, c):
mix_1 = {0}_fun(a, b)
mix_2 = {1}_fun(mix_1, c)
return mix_2
""".format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0])
exec(compile(exe_str_1, '', 'exec'))
exec('mix_fun_set[\'{0}_{1}_fun\'] = {0}_{1}_fun'
.format(fun_1.__name__.split('_')[0], fun_2.__name__.split('_')[0]))
return mix_fun_set
class FactorTest:
def __init__(self, root_path, if_save, if_new_program, begin_date, cut_date, end_date, time_para_dict, sector_name,
hold_time, lag, return_file, if_hedge, if_only_long, if_weight=0.5, ic_weight=0.5,
para_adj_set_list=None):
self.root_path = root_path
self.if_save = if_save
self.if_new_program = if_new_program
self.begin_date = begin_date
self.cut_date = cut_date
self.end_date = end_date
self.time_para_dict = time_para_dict
self.sector_name = sector_name
self.hold_time = hold_time
self.lag = lag
self.return_file = return_file
self.if_hedge = if_hedge
self.if_only_long = if_only_long
self.if_weight = if_weight
self.ic_weight = ic_weight
if para_adj_set_list is None:
self.para_adj_set_list = [
{'pot_in_num': 50, 'leve_ratio_num': 2, 'sp_in': 1.5, 'ic_num': 0.0, 'fit_ratio': 2},
{'pot_in_num': 40, 'leve_ratio_num': 2, 'sp_in': 1.5, 'ic_num': 0.0, 'fit_ratio': 2},
{'pot_in_num': 50, 'leve_ratio_num': 2, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 1},
{'pot_in_num': 50, 'leve_ratio_num': 1, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 2},
{'pot_in_num': 50, 'leve_ratio_num': 1, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 1},
{'pot_in_num': 40, 'leve_ratio_num': 1, 'sp_in': 1, 'ic_num': 0.0, 'fit_ratio': 1}]
return_choose = self.load_return_data()
self.xinx = return_choose.index
sector_df = self.load_sector_data()
self.xnms = sector_df.columns
return_choose = return_choose.reindex(columns=self.xnms)
self.sector_df = sector_df.reindex(index=self.xinx)
# print('Loaded sector DataFrame!')
if if_hedge:
if ic_weight + if_weight != 1:
exit(-1)
else:
if_weight = 0
ic_weight = 0
index_df_1 = self.load_index_data('000300').fillna(0)
# index_weight_1 = self.load_index_weight_data('000300')
index_df_2 = self.load_index_data('000905').fillna(0)
# index_weight_2 = self.load_index_weight_data('000905')
#
# weight_df = if_weight * index_weight_1 + ic_weight * index_weight_2
hedge_df = if_weight * index_df_1 + ic_weight * index_df_2
self.return_choose = return_choose.sub(hedge_df, axis=0)
# print('Loaded return DataFrame!')
suspendday_df, limit_buy_sell_df = self.load_locked_data()
limit_buy_sell_df_c = limit_buy_sell_df.shift(-1)
limit_buy_sell_df_c.iloc[-1] = 1
suspendday_df_c = suspendday_df.shift(-1)
suspendday_df_c.iloc[-1] = 1
self.suspendday_df_c = suspendday_df_c
self.limit_buy_sell_df_c = limit_buy_sell_df_c
# print('Loaded suspendday_df and limit_buy_sell DataFrame!')
def reindex_fun(self, df):
return df.reindex(index=self.xinx, columns=self.xnms)
@staticmethod
def create_log_save_path(target_path):
top_path = os.path.split(target_path)[0]
if not os.path.exists(top_path):
os.mkdir(top_path)
if not os.path.exists(target_path):
os.mknod(target_path)
@staticmethod
def row_extre(raw_df, sector_df, percent):
raw_df = raw_df * sector_df
target_df = raw_df.rank(axis=1, pct=True)
target_df[target_df >= 1 - percent] = 1
target_df[target_df <= percent] = -1
target_df[(target_df > percent) & (target_df < 1 - percent)] = 0
return target_df
@staticmethod
def pos_daily_fun(df, n=5):
return df.rolling(window=n, min_periods=1).sum()
def check_factor(self, name_list, file_name):
load_path = os.path.join('/mnt/mfs/dat_whs/data/new_factor_data/' + self.sector_name)
exist_factor = set([x[:-4] for x in os.listdir(load_path)])
print()
use_factor = set(name_list)
a = use_factor - exist_factor
if len(a) != 0:
print('factor not enough!')
print(a)
print(len(a))
send_email.send_email(f'{file_name} factor not enough!', ['whs@yingpei.com'], [], 'Factor Test Warning!')
@staticmethod
def create_all_para(tech_name_list, funda_name_list):
target_list_1 = []
for tech_name in tech_name_list:
for value in combinations(funda_name_list, 2):
target_list_1 += [[tech_name] + list(value)]
target_list_2 = []
for funda_name in funda_name_list:
for value in combinations(tech_name_list, 2):
target_list_2 += [[funda_name] + list(value)]
target_list = target_list_1 + target_list_2
return target_list
# 获取剔除新股的矩阵
def get_new_stock_info(self, xnms, xinx):
new_stock_data = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Tab01/CDSY_SECUCODE/LISTSTATE.csv'))
new_stock_data.fillna(method='ffill', inplace=True)
# 获取交易日信息
return_df = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_14/aadj_r.csv')).astype(float)
trade_time = return_df.index
new_stock_data = new_stock_data.reindex(index=trade_time).fillna(method='ffill')
target_df = new_stock_data.shift(40).notnull().astype(int)
target_df = target_df.reindex(columns=xnms, index=xinx)
return target_df
# 获取剔除st股票的矩阵
def get_st_stock_info(self, xnms, xinx):
data = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Tab01/CDSY_CHANGEINFO/CHANGEA.csv'))
data = data.reindex(columns=xnms, index=xinx)
data.fillna(method='ffill', inplace=True)
data = data.astype(str)
target_df = data.applymap(lambda x: 0 if 'ST' in x or 'PT' in x else 1)
return target_df
def load_return_data(self):
return_choose = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_14/aadj_r.csv'))
return_choose = return_choose[(return_choose.index >= self.begin_date) & (return_choose.index < self.end_date)]
return return_choose
# 获取sector data
def load_sector_data(self):
market_top_n = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_10/' + self.sector_name + '.csv'))
market_top_n = market_top_n.reindex(index=self.xinx)
market_top_n.dropna(how='all', axis='columns', inplace=True)
xnms = market_top_n.columns
xinx = market_top_n.index
new_stock_df = self.get_new_stock_info(xnms, xinx)
st_stock_df = self.get_st_stock_info(xnms, xinx)
sector_df = market_top_n * new_stock_df * st_stock_df
sector_df.replace(0, np.nan, inplace=True)
return sector_df
def load_index_weight_data(self, index_name):
index_info = bt.AZ_Load_csv(self.root_path + f'/EM_Funda/IDEX_YS_WEIGHT_A/SECURITYNAME_{index_name}.csv')
index_info = self.reindex_fun(index_info)
index_mask = (index_info.notnull() * 1).replace(0, np.nan)
mkt_cap = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/LICO_YS_STOCKVALUE/AmarketCapExStri.csv'))
mkt_roll = mkt_cap.rolling(250, min_periods=0).mean()
mkt_roll = self.reindex_fun(mkt_roll)
mkt_roll_qrt = np.sqrt(mkt_roll)
mkt_roll_qrt_index = mkt_roll_qrt * index_mask
index_weight = mkt_roll_qrt_index.div(mkt_roll_qrt_index.sum(axis=1), axis=0)
return index_weight
# 涨跌停都不可交易
def load_locked_data(self):
raw_suspendday_df = bt.AZ_Load_csv(
os.path.join(self.root_path, 'EM_Funda/TRAD_TD_SUSPENDDAY/SUSPENDREASON.csv'))
suspendday_df = raw_suspendday_df.isnull().astype(int)
suspendday_df = suspendday_df.reindex(columns=self.xnms, index=self.xinx, fill_value=True)
suspendday_df.replace(0, np.nan, inplace=True)
return_df = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/DERIVED_14/aadj_r.csv')).astype(float)
limit_buy_sell_df = (return_df.abs() < 0.095).astype(int)
limit_buy_sell_df = limit_buy_sell_df.reindex(columns=self.xnms, index=self.xinx, fill_value=1)
limit_buy_sell_df.replace(0, np.nan, inplace=True)
return suspendday_df, limit_buy_sell_df
# 获取index data
def load_index_data(self, index_name):
data = bt.AZ_Load_csv(os.path.join(self.root_path, 'EM_Funda/INDEX_TD_DAILYSYS/CHG.csv'))
target_df = data[index_name].reindex(index=self.xinx)
return target_df * 0.01
# 读取部分factor
def load_part_factor(self, sector_name, xnms, xinx, file_list):
factor_set = OrderedDict()
for file_name in file_list:
load_path = os.path.join('/mnt/mfs/dat_whs/data/new_factor_data/' + sector_name)
target_df = pd.read_pickle(os.path.join(load_path, file_name + '.pkl'))
factor_set[file_name] = target_df.reindex(columns=xnms, index=xinx).fillna(0)
return factor_set
# 读取factor
def load_factor(self, file_name):
factor_set = OrderedDict()
load_path = os.path.join('/mnt/mfs/dat_whs/data/new_factor_data/' + self.sector_name)
target_df = pd.read_pickle(os.path.join(load_path, file_name + '.pkl'))
factor_set[file_name] = target_df.reindex(columns=self.xnms, index=self.xinx).fillna(0)
return factor_set
def deal_mix_factor(self, mix_factor):
if self.if_only_long:
mix_factor = mix_factor[mix_factor > 0]
# 下单日期pos
order_df = mix_factor.replace(np.nan, 0)
# 排除入场场涨跌停的影响
order_df = order_df * self.sector_df * self.limit_buy_sell_df_c * self.suspendday_df_c
order_df = order_df.div(order_df.abs().sum(axis=1).replace(0, np.nan), axis=0)
order_df[order_df > 0.05] = 0.05
order_df[order_df < -0.05] = -0.05
daily_pos = pos_daily_fun(order_df, n=self.hold_time)
daily_pos.fillna(0, inplace=True)
# 排除出场涨跌停的影响
daily_pos = daily_pos * self.limit_buy_sell_df_c * self.suspendday_df_c
daily_pos.fillna(method='ffill', inplace=True)
return daily_pos
def save_load_control(self, tech_name_list, funda_name_list, suffix_name, file_name):
# 参数存储与加载的路径控制
result_save_path = '/mnt/mfs/dat_whs/result'
if self.if_new_program:
now_time = datetime.now().strftime('%Y%m%d_%H%M')
if self.if_only_long:
file_name = '{}_{}_{}_hold_{}_{}_{}_long.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
else:
file_name = '{}_{}_{}_hold_{}_{}_{}.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_dict = dict()
para_ready_df = pd.DataFrame(list(self.create_all_para(tech_name_list, funda_name_list)))
total_para_num = len(para_ready_df)
if self.if_save:
self.create_log_save_path(log_save_file)
self.create_log_save_path(result_save_file)
self.create_log_save_path(para_save_file)
para_dict['para_ready_df'] = para_ready_df
para_dict['tech_name_list'] = tech_name_list
para_dict['funda_name_list'] = funda_name_list
pd.to_pickle(para_dict, para_save_file)
else:
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_tested_df = pd.read_table(log_save_file, sep='|', header=None, index_col=0)
para_all_df = pd.read_pickle(para_save_file)
total_para_num = len(para_all_df)
para_ready_df = para_all_df.loc[sorted(list(set(para_all_df.index) - set(para_tested_df.index)))]
print(file_name)
print(f'para_num:{len(para_ready_df)}')
return para_ready_df, log_save_file, result_save_file, total_para_num
@staticmethod
def create_all_para_(change_list, ratio_list, tech_list):
target_list = list(product(change_list, ratio_list, tech_list))
return target_list
def save_load_control_(self, change_list, ratio_list, tech_list, suffix_name, file_name):
# 参数存储与加载的路径控制
result_save_path = '/mnt/mfs/dat_whs/result'
if self.if_new_program:
now_time = datetime.now().strftime('%Y%m%d_%H%M')
if self.if_only_long:
file_name = '{}_{}_{}_hold_{}_{}_{}_long.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
else:
file_name = '{}_{}_{}_hold_{}_{}_{}.txt' \
.format(self.sector_name, self.if_hedge, now_time, self.hold_time, self.return_file, suffix_name)
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_dict = dict()
para_ready_df = pd.DataFrame(list(self.create_all_para_(change_list, ratio_list, tech_list)))
total_para_num = len(para_ready_df)
if self.if_save:
self.create_log_save_path(log_save_file)
self.create_log_save_path(result_save_file)
self.create_log_save_path(para_save_file)
para_dict['para_ready_df'] = para_ready_df
para_dict['change_list'] = change_list
para_dict['ratio_list'] = ratio_list
para_dict['tech_list'] = tech_list
pd.to_pickle(para_dict, para_save_file)
else:
log_save_file = os.path.join(result_save_path, 'log', file_name)
result_save_file = os.path.join(result_save_path, 'result', file_name)
para_save_file = os.path.join(result_save_path, 'para', file_name)
para_tested_df = pd.read_table(log_save_file, sep='|', header=None, index_col=0)
para_all_df = pd.read_pickle(para_save_file)
total_para_num = len(para_all_df)
para_ready_df = para_all_df.loc[sorted(list(set(para_all_df.index) - set(para_tested_df.index)))]
print(file_name)
print(f'para_num:{len(para_ready_df)}')
return para_ready_df, log_save_file, result_save_file, total_para_num
class FactorTestSector(FactorTest):
def __init__(self, *args):
super(FactorTestSector, self).__init__(*args)
def load_remy_factor_2(self, file_name, sector_name):
if sector_name.startswith('market_top_300plus'):
factor_path = '/mnt/mfs/DAT_EQT/EM_Funda/DERIVED_F3/T300P'
elif sector_name.startswith('market_top_300to800plus'):
factor_path = '/mnt/mfs/DAT_EQT/EM_Funda/DERIVED_F3/T500P'
else:
factor_path = '/mnt/mfs/DAT_EQT/EM_Funda/DERIVED_F3/T500P'
raw_df = bt.AZ_Load_csv(f'{factor_path}/{file_name}')
a = list(set(raw_df.iloc[-1, :100].dropna().values))
tmp_df = raw_df.reindex(index=self.xinx, columns=self.xnms)
if len(a) > 5:
target_df = self.row_extre(tmp_df, self.sector_df, 0.3)
else:
target_df = tmp_df
pass
if self.if_only_long:
target_df = target_df[target_df > 0]
return target_df
def single_test(self, name_1):
factor_1 = self.load_remy_factor_2(name_1, self.sector_name)
daily_pos = self.deal_mix_factor(factor_1).shift(2)
in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df = filter_all(self.cut_date, daily_pos, self.return_choose,
if_return_pnl=True,
if_only_long=self.if_only_long)
if bt.AZ_Sharpe_y(pnl_df) > 0:
return 1
else:
return -1
def single_test_c(self, name_list):
mix_factor = pd.DataFrame()
for i in range(len(name_list)):
tmp_name = name_list[i]
buy_sell_way = self.single_test(tmp_name)
tmp_factor = self.load_remy_factor_2(tmp_name, self.sector_name)
mix_factor = mix_factor.add(tmp_factor * buy_sell_way, fill_value=0)
# daily_pos = self.deal_mix_factor(mix_factor).shift(2)
# in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d, pot_in, \
# fit_ratio, leve_ratio, sp_in, sharpe_q_out, pnl_df = \
# filter_all(self.cut_date, daily_pos, self.return_choose, if_return_pnl=True, if_only_long=False)
# print(in_condition, out_condition, ic, sharpe_q_in_df_u, sharpe_q_in_df_m, sharpe_q_in_df_d,
# pot_in, fit_ratio, leve_ratio, sp_in, sharpe_q_out)
return mix_factor
def load_index_data(index_name, xinx):
data = bt.AZ_Load_csv(os.path.join('/mnt/mfs/DAT_EQT', 'EM_Tab09/INDEX_TD_DAILYSYS/CHG.csv'))
target_df = data[index_name].reindex(index=xinx)
return target_df * 0.01
def get_corr_matrix(cut_date=None):
pos_file_list = [x for x in os.listdir('/mnt/mfs/AAPOS') if x.startswith('WHS')]
return_df = bt.AZ_Load_csv('/media/hdd1/DAT_EQT/EM_Funda/DERIVED_14/aadj_r.csv').astype(float)
index_df_1 = load_index_data('000300', return_df.index).fillna(0)
index_df_2 = load_index_data('000905', return_df.index).fillna(0)
sum_pnl_df = pd.DataFrame()
for pos_file_name in pos_file_list:
pos_df = bt.AZ_Load_csv('/mnt/mfs/AAPOS/{}'.format(pos_file_name))
cond_1 = 'IF01' in pos_df.columns
cond_2 = 'IC01' in pos_df.columns
if cond_1 and cond_2:
hedge_df = 0.5 * index_df_1 + 0.5 * index_df_2
return_df_c = return_df.sub(hedge_df, axis=0)
elif cond_1:
hedge_df = index_df_1
return_df_c = return_df.sub(hedge_df, axis=0)
elif cond_2:
hedge_df = index_df_2
return_df_c = return_df.sub(hedge_df, axis=0)
else:
print('alpha hedge error')
continue
pnl_df = (pos_df.shift(2) * return_df_c).sum(axis=1)
pnl_df.name = pos_file_name
sum_pnl_df = pd.concat([sum_pnl_df, pnl_df], axis=1)
# plot_send_result(pnl_df, bt.AZ_Sharpe_y(pnl_df), 'mix_factor')
if cut_date is not None:
sum_pnl_df = sum_pnl_df[sum_pnl_df.index > cut_date]
return sum_pnl_df
def get_all_pnl_corr(pnl_df, col_name):
all_pnl_df = pd.read_csv('/mnt/mfs/AATST/corr_tst_pnls', sep='|', index_col=0, parse_dates=True)
all_pnl_df_c = pd.concat([all_pnl_df, pnl_df], axis=1)
a = all_pnl_df_c.iloc[-600:].corr()[col_name]
print(a[a > 0.5])
return a[a > 0.65]
def corr_test_fun(pnl_df, alpha_name):
sum_pnl_df = get_corr_matrix(cut_date=None)
sum_pnl_df_c = pd.concat([sum_pnl_df, pnl_df], axis=1)
corr_self = sum_pnl_df_c.corr()[[alpha_name]]
other_corr = get_all_pnl_corr(pnl_df, alpha_name)
# print(other_corr)
self_corr = corr_self[corr_self > 0.65].dropna(axis=0)
print(corr_self[corr_self > 0.5].dropna(axis=0))
if len(self_corr) >= 2 or len(other_corr) >= 2:
print('FAIL!')
send_email.send_email('FAIL!\n' + self_corr.to_html(),
['whs@yingpei.com'],
[],
'[RESULT DEAL]' + alpha_name)
else:
print('SUCCESS!')
send_email.send_email('SUCCESS!\n' + self_corr.to_html(),
['whs@yingpei.com'],
[],
'[RESULT DEAL]' + alpha_name)
print('______________________________________')
return 0
def config_test():
factor_str = 'REMRATIO.ADJV02.034|REMRATIO.ADCH01.082|REMRATIO.VAGR02.020|REMRATIO.ADCH02.023|REMRATIO.ADCH02.024'
info_str = 'market_top_300to800plus|5|False'
factor_name_list = factor_str.split('|')
alpha_name = 'WHSTINKER14'
sector_name, hold_time, if_only_long = info_str.split('|')
hold_time = int(hold_time)
if if_only_long == 'True':
if_only_long = True
else:
if_only_long = False
cut_date = '20180601'
begin_date = pd.to_datetime('20130101')
end_date = datetime.now()
root_path = '/media/hdd1/DAT_EQT'
# root_path = '/mnt/mfs/DAT_EQT'
if_save = False
if_new_program = True
lag = 2
return_file = ''
if_hedge = True
if sector_name.startswith('market_top_300plus'):
if_weight = 1
ic_weight = 0
elif sector_name.startswith('market_top_300to800plus'):
if_weight = 0
ic_weight = 1
else:
if_weight = 0.5
ic_weight = 0.5
time_para_dict = dict()
main = FactorTestSector(root_path, if_save, if_new_program, begin_date, cut_date, end_date, time_para_dict,
sector_name, hold_time, lag, return_file, if_hedge, if_only_long, if_weight, ic_weight)
mix_factor = main.single_test_c(factor_name_list)
sum_pos_df_new = main.deal_mix_factor(mix_factor)
if if_weight != 0:
sum_pos_df_new['IF01'] = -if_weight * sum_pos_df_new.sum(axis=1)
if ic_weight != 0:
sum_pos_df_new['IC01'] = -ic_weight * sum_pos_df_new.sum(axis=1)
pnl_df = (sum_pos_df_new.shift(2) * main.return_choose).sum(axis=1)
pnl_df.name = alpha_name
corr_test_fun(pnl_df, alpha_name)
plot_send_result(pnl_df, bt.AZ_Sharpe_y(pnl_df), alpha_name)
sum_pos_df_new.round(10).fillna(0).to_csv(f'/mnt/mfs/AAPOS/{alpha_name}.pos', sep='|', index_label='Date')
return sum_pos_df_new
if __name__ == '__main__':
t1 = time.time()
sum_pos_df = config_test()
t2 = time.time()
print(round(t2 - t1, 4))
|
[
"1612255875@qq.com"
] |
1612255875@qq.com
|
d7852839eba89e6a94266098418f2bc2f8fc6002
|
df9f83a5134d0eac8f259ebd3243d5494adde21d
|
/app/main.py
|
387ddcd17e595f33befd93b58ac088ae6b774841
|
[] |
no_license
|
alexgorin/coolapp
|
e40af20bbd96567092de1766e9e611178b6abba0
|
32e8462a44d1b3eabe45556b46d99366cf75800f
|
refs/heads/master
| 2021-08-28T11:22:42.376675
| 2017-12-12T04:01:50
| 2017-12-12T04:01:50
| 113,939,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
from flask import Flask
app = Flask(__name__)
def do_something():
return 'Hi!'
@app.route('/')
def index():
return do_something()
|
[
"saniagorin@gmail.com"
] |
saniagorin@gmail.com
|
3cade65d4539e0f2b810f0bf64bcc272eb017652
|
df927bcec54171297b04ffab0dbffe79d0ca46c2
|
/tests/conftest.py
|
40cb50c13d36cf7228924db93666defdea2eba23
|
[] |
no_license
|
nickmflorin/portfolio-drf
|
d7645f31033320811af1b83c83ed9610331e1332
|
995549569e320d8f3deec020dc68baceef5b436e
|
refs/heads/master
| 2022-12-23T20:58:18.611580
| 2020-04-29T23:30:28
| 2020-04-29T23:30:28
| 247,204,266
| 1
| 0
| null | 2022-12-08T09:32:01
| 2020-03-14T03:26:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
import pytest
from rest_framework.test import APIClient
from tests.factories import factories
@pytest.fixture
def api_client(monkeypatch):
"""
Custom API Client that we will use to monkeypatch around edge cases if we
need it. We will most likely need to do this so this is a good starting
point.
"""
class CustomApiClient(APIClient):
pass
return CustomApiClient()
def inject_factory_fixtures():
"""
This method will dynamically create factory fixtures for all of the
factories present in the factories module.
If we have a factory:
>>> class SomeFactory():
>>> ...
Then, a fixture will be generated with the name `some` which can be used
as:
>>> def test_something(create_some):
>>> model = create_some(attr1='foo', attr2='bar')
"""
def model_factory_fixture_generator(factory_cls):
@pytest.fixture(scope='module')
def factory_fixture():
def factory_generator(**kwargs):
return factory_cls(**kwargs)
return factory_generator
return factory_fixture
for factory_name in factories.__dict__['__all__']:
factory_cls = factories.__dict__[factory_name]
name = f"create_{factory_name.split('Factory')[0].lower()}"
globals()[name] = model_factory_fixture_generator(factory_cls)
inject_factory_fixtures()
|
[
"nickmflorin@gmail.com"
] |
nickmflorin@gmail.com
|
41fb7efe3801ee32639dc732194460721a482b4f
|
6ff66d3c3fbd10bbd7b6d07ddc83c486eabc2cc2
|
/paluck-networks-load.py
|
530a63b0fd04902e2f9412ffb736788c08d38565
|
[] |
no_license
|
anuragdutt/informationDiffusion
|
c75f74b1bbf478832cd6154438116b5f075831ce
|
372d993bea6283504527af0fb409560ad2ebb121
|
refs/heads/master
| 2023-04-06T09:27:13.130947
| 2021-04-06T09:47:20
| 2021-04-06T09:47:20
| 341,443,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
import networkx as nx
import pandas as pd
edgelist = pd.read_csv('paluck-edgelist.csv')
G = nx.from_pandas_edgelist(edgelist, source='ID', target='PEERID')
print(nx.info(G))
print(f'connected?\t{nx.is_connected(G)}')
print(f'# of connected components:\t{nx.number_connected_components(G)}')
components = nx.connected_components(G)
for i,c in enumerate(components):
print(f'# of nodes in {i}th component is {len(c)}')
|
[
"anurag2709@gmail.com"
] |
anurag2709@gmail.com
|
458891471f25b96114bd5c3c58984aa705e66b96
|
9ddac00c5c9e51bc4426f58a7b829896cd52b13a
|
/Forums/stores.py
|
ed3e2fac82f22722381501123527691a90331d5e
|
[
"MIT"
] |
permissive
|
omarkamel02/firstBEproject
|
ed401167b58aa898c03e43ebf0d274efb35d6216
|
47dd1d2841348dd2fe9f8a0bd6208593847f9d8f
|
refs/heads/master
| 2020-03-06T21:00:21.544531
| 2018-04-03T11:19:59
| 2018-04-03T11:19:59
| 127,067,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
import copy
class BaseStore(object):
def __init__(self, data_provider, last_id):
self._data_provider = data_provider
self.last_id = last_id
def get_all(self):
return self._data_provider
def add(self, item_instance):
item_instance.id = self.last_id
self._data_provider.append(item_instance)
#print(item_instance.id)
#print item_instance.id
self.last_id+=1
def get_by_id(self, id):
for mem in self.get_all() :
if mem.id == id:
return mem
return 0
def update(self, instance):
name=self.get_by_id(instance.id)
index=self._data_provider.index(name)
self._data_provider[index]=instance
def delete(self, id):
# delete member by id
name=self.get_by_id(id)
self._data_provider.remove(name)
def entity_exists(self, instance):
if self.get_by_id(member.id) != 0:
return True
else :
return False
class MemberStore(BaseStore):
members = []
last_id = 1
def __init__(self):
super(MemberStore, self).__init__(MemberStore.members, MemberStore.last_id)
def get_members_with_posts(self,all_posts) :
return_list=[]
allmembers=self.get_all()
for mem in allmembers :
#print mem.id
for post in all_posts:
if post.member_id==mem.id :
mem.posts.append(post)
return self.get_all()
def get_top_two(self,all_posts):
list=self.get_members_with_posts(all_posts)
list.sort(key=lambda member: len(member.posts), reverse=True)
return list[:2]
def get_by_name (self,name):
all_members = self.get_all()
for member in all_members:
if member.name == member_name:
yield member
class PostsStore(BaseStore):
posts=[]
last_id=1
def __init__(self):
super(PostsStore, self).__init__(PostsStore.posts,PostsStore.last_id)
def get_posts_by_date(self):
all_posts=self.get_all()
all_posts.sort(key=lambda post: post.date)
return posts_sorted_byDate
|
[
"omarko.kamel@gmail.com"
] |
omarko.kamel@gmail.com
|
d033d51fe930009bccfebf3e180769e52865363c
|
248147c355e452e069eb19723b5653cdfab69830
|
/semlog_mongo/semlog_mongo/analysis.py
|
f6c68d90b388e63e4e0e63a6040be0f7204ccd68
|
[
"BSD-3-Clause"
] |
permissive
|
robcog-iai/semlog_web
|
2c8bea5e317826dca034be45ae946ac553efcc90
|
dde8478f8fc2a273c94ce8f55faf8b0b80a8213c
|
refs/heads/master
| 2023-03-12T18:22:47.617150
| 2021-02-28T14:51:00
| 2021-02-28T14:51:00
| 192,709,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
import pprint
def get_image_information(client):
"""This function is used for data analysis.
Retrieve image information (num_entities, average linear/angular distance and timestamp).
Args:
client: A MongoClient to search for.
Returns:
A list of dicts contains average information.
"""
pipeline = [{"$match": {"camera_views": {"$exists": 1}}}, {"$unwind": {"path": "$camera_views"}}, {"$addFields": {
"camera_views.average_linear_distance": {
"$divide": [
"$camera_views.total_linear_distance",
"$camera_views.num_entities"
]
},
"camera_views.average_angular_distance": {
"$divide": [
"$camera_views.total_angular_distance",
"$camera_views.num_entities"
]
},
"camera_views.timestamp": "$timestamp",
"camera_views._id": "$_id",
"camera_views.database": client.database.name,
"camera_views.collection": client.name,
'camera_views.file_id':"$camera_views.images.file_id", #Add the Color image id for downloading and testing
}}, {"$replaceRoot": {"newRoot": "$camera_views"}}, {"$project": {
"_id": 1,
"num_entities": 1,
"average_linear_distance": 1,
"average_angular_distance": 1,
"timestamp": 1,
"duplicate": 1,
"database":1,
"collection":1,
"file_id":{"$arrayElemAt":["$images.file_id",0]}, # Only keep the first file id (The Color image)
}}]
pprint.pprint(pipeline)
result = list(client.aggregate(pipeline))
return result
|
[
"584827526@qq.com"
] |
584827526@qq.com
|
65fe0b5c35c39628317e1ccdffd321edb9536df9
|
ba81110b54feba9e33127ed6f04c08cbe6a8252e
|
/sloth/parsers/SLComp18Visitor.py
|
a58d39778ac82c1eb5d9ee83eed7324ef4afa24d
|
[
"MIT"
] |
permissive
|
katelaan/sloth
|
cd7285a8e64981c15b327707f552243e9e6f38a1
|
f487911c4e6850253c592cf65280390f39e24a87
|
refs/heads/master
| 2021-04-09T10:19:28.233758
| 2019-02-05T12:22:00
| 2019-02-05T12:22:00
| 125,380,933
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,051
|
py
|
from antlr4 import *
if __name__ is not None and "." in __name__:
from .SLComp18Parser import SLComp18Parser
else:
from SLComp18Parser import SLComp18Parser
from functools import partial
from itertools import groupby
from .representation import *
def cmd_not_supported(cmd, args):
raise NotImplementedError('No support for {}'.format(cmd))
def no_support(cmd):
return partial(cmd_not_supported, cmd)
def declare_datatypes(args):
it = groupby(args, key = lambda arg: isinstance(arg, SortDecl))
decls = list(next(it)[1])
terms = list(next(it)[1])
assert len(decls) == len(terms)
return [DataTypeDecl(*pair) for pair in zip(decls, terms)]
def declare_heap(args):
return HeapDecl({
decl_pair[0]: decl_pair[1]
for decl_pair in args
})
def define_funs_rec(args):
it = groupby(args, key = lambda arg: isinstance(arg, FunDecl))
decls = list(next(it)[1])
terms = list(next(it)[1])
assert len(decls) == len(terms)
return [FunDef(*pair) for pair in zip(decls, terms)]
cmdTypeToCmd = {
'assert': lambda args: Assert(args[0]),
'check-sat': lambda _: Task('check-sat', []),
'check-sat-assuming': no_support('check-sat-assuming'),
'check-unsat': lambda _: Task('check-unsat', []),
'declare-const': lambda args: ConstDecl(*args),
'declare-fun': no_support('declare-fun'),
'declare-datatype': lambda args: [SortDecl(args[0], 0), args[1]],
'declare-datatypes': declare_datatypes,
'declare-heap': declare_heap,
'declare-sort': lambda args: SortDecl(*args),
'define-fun': no_support('define-fun'),
'define-fun-rec': lambda args: args[0],
'define-funs-rec': define_funs_rec,
'define-sort': no_support('define-sort'),
'echo': no_support('echo'),
'exit': no_support('exit'),
'get-assertions': no_support('get-assertions'),
'get-assignment': no_support('get-assignment'),
'get-info': no_support('get-info'),
'get-model': lambda _: Task('get-model', []),
'get-option': no_support('get-option'),
'get-proof': no_support('get-proof'),
'get-unsat-assumptions': no_support('get-unsat-assumptions'),
'get-unsat-core': no_support('get-unsat-core'),
'get-value': no_support('get-value'),
'pop': no_support('pop'),
'push': no_support('push'),
'reset': no_support('reset'),
'reset-assertions': no_support('reset-assertions'),
'set-info': lambda args: Meta('set-info', args),
'set-logic': lambda args: Meta('set-logic', args),
'set-option': no_support('set-option'),
}
class SLComp18Visitor(ParseTreeVisitor):
def aggregateResult(self, aggregate, nextResult):
if aggregate is None:
return nextResult
elif nextResult is None:
return aggregate
elif isinstance(aggregate, list):
return aggregate + [nextResult]
else:
return [aggregate, nextResult]
def fail(self, ctx):
print('Text: {}'.format(ctx.getText))
print('Children: {}'.format(visitChildren(ctx)))
raise NotImplementedError()
# Visit a parse tree produced by SLComp18Parser#start.
def visitStart(self, ctx:SLComp18Parser.StartContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SLComp18Parser#response.
def visitResponse(self, ctx:SLComp18Parser.ResponseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#generalReservedWord.
def visitGeneralReservedWord(self, ctx:SLComp18Parser.GeneralReservedWordContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#simpleSymbol.
def visitSimpleSymbol(self, ctx:SLComp18Parser.SimpleSymbolContext):
return Symbol(ctx.getText())
# Visit a parse tree produced by SLComp18Parser#quotedSymbol.
def visitQuotedSymbol(self, ctx:SLComp18Parser.QuotedSymbolContext):
return Symbol(ctx.getText())
# Visit a parse tree produced by SLComp18Parser#predefSymbol.
def visitPredefSymbol(self, ctx:SLComp18Parser.PredefSymbolContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#predefKeyword.
def visitPredefKeyword(self, ctx:SLComp18Parser.PredefKeywordContext):
return Keyword(ctx.getText())
# Visit a parse tree produced by SLComp18Parser#symbol.
def visitSymbol(self, ctx:SLComp18Parser.SymbolContext):
# Just propagate the simple/quoted symbol constructed from the children
return self.visitChildren(ctx)
# Visit a parse tree produced by SLComp18Parser#numeral.
def visitNumeral(self, ctx:SLComp18Parser.NumeralContext):
return int(ctx.getText())
# Visit a parse tree produced by SLComp18Parser#decimal.
def visitDecimal(self, ctx:SLComp18Parser.DecimalContext):
return float(ctx.getText())
# Visit a parse tree produced by SLComp18Parser#hexadecimal.
def visitHexadecimal(self, ctx:SLComp18Parser.HexadecimalContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#binary.
def visitBinary(self, ctx:SLComp18Parser.BinaryContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#string.
def visitString(self, ctx:SLComp18Parser.StringContext):
return ctx.getText()
# Visit a parse tree produced by SLComp18Parser#keyword.
def visitKeyword(self, ctx:SLComp18Parser.KeywordContext):
# Avoid another level of wrapping by propagating predefined keywords
return self.visitChildren(ctx)
# Visit a parse tree produced by SLComp18Parser#spec_constant.
def visitSpec_constant(self, ctx:SLComp18Parser.Spec_constantContext):
return Constant(self.visitChildren(ctx))
# Visit a parse tree produced by SLComp18Parser#s_expr.
def visitS_expr(self, ctx:SLComp18Parser.S_exprContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#index.
def visitIndex(self, ctx:SLComp18Parser.IndexContext):
# Just pass on the symbol/index rather than introducing another level of wrapping
return self.visitChildren(ctx)
# Visit a parse tree produced by SLComp18Parser#identifier.
def visitIdentifier(self, ctx:SLComp18Parser.IdentifierContext):
child_res = self.visitChildren(ctx)
is_indexed = False
try:
is_indexed = len(child_res) >= 2
except:
pass
if is_indexed:
return IndexedIdentifier(child_res[0], child_res[1:])
else:
assert isinstance(child_res, Symbol)
return child_res
# Visit a parse tree produced by SLComp18Parser#attribute_value.
def visitAttribute_value(self, ctx:SLComp18Parser.Attribute_valueContext):
child_res = self.visitChildren(ctx)
assert type(child_res) in (Symbol, Constant)
return AttributeValue(child_res)
# Visit a parse tree produced by SLComp18Parser#attribute.
def visitAttribute(self, ctx:SLComp18Parser.AttributeContext):
return Attribute(*self.visitChildren(ctx))
# Visit a parse tree produced by SLComp18Parser#sort.
def visitSort(self, ctx:SLComp18Parser.SortContext):
child_res = self.visitChildren(ctx)
assert isinstance(child_res, Symbol), \
"Received {} of type {} instead of Symbol".format(child_res, type(child_res))
return Sort(child_res)
# Visit a parse tree produced by SLComp18Parser#qual_identifer.
def visitQual_identifer(self, ctx:SLComp18Parser.Qual_identiferContext):
child_res = self.visitChildren(ctx)
try:
return QualifiedIdentifier(*child_res)
except:
assert type(child_res) in (Symbol, IndexedIdentifier), \
"Received {} of type {} instead of Symbol/IndexedIdentifier".format(child_res, type(child_res))
return child_res
# Visit a parse tree produced by SLComp18Parser#var_binding.
def visitVar_binding(self, ctx:SLComp18Parser.Var_bindingContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#sorted_var.
def visitSorted_var(self, ctx:SLComp18Parser.Sorted_varContext):
return SortedVar(*self.visitChildren(ctx))
# Visit a parse tree produced by SLComp18Parser#pattern.
def visitPattern(self, ctx:SLComp18Parser.PatternContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#match_case.
def visitMatch_case(self, ctx:SLComp18Parser.Match_caseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#term.
def visitTerm(self, ctx:SLComp18Parser.TermContext):
#child = ctx.getChild(1)
#if child:
try:
child = ctx.getChild(1)
except:
return self.visitChildren(ctx)
else:
txt = child.getText()
if txt == 'exists':
child_res = self.visitChildren(ctx)
vars_ = child_res[:-1]
term = child_res[-1]
return Exists(vars_, term)
elif txt == 'forall':
raise NotImplementedError()
elif txt == '!':
raise NotImplementedError()
elif txt == 'match':
raise NotImplementedError()
else:
return self.visitChildren(ctx)
# Visit a parse tree produced by SLComp18Parser#sort_symbol_decl.
def visitSort_symbol_decl(self, ctx:SLComp18Parser.Sort_symbol_declContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#meta_spec_constant.
def visitMeta_spec_constant(self, ctx:SLComp18Parser.Meta_spec_constantContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#fun_symbol_decl.
def visitFun_symbol_decl(self, ctx:SLComp18Parser.Fun_symbol_declContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#par_fun_symbol_decl.
def visitPar_fun_symbol_decl(self, ctx:SLComp18Parser.Par_fun_symbol_declContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#theory_attribute.
def visitTheory_attribute(self, ctx:SLComp18Parser.Theory_attributeContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#theory_decl.
def visitTheory_decl(self, ctx:SLComp18Parser.Theory_declContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#logic_attribue.
def visitLogic_attribue(self, ctx:SLComp18Parser.Logic_attribueContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#logic.
def visitLogic(self, ctx:SLComp18Parser.LogicContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#sort_dec.
def visitSort_dec(self, ctx:SLComp18Parser.Sort_decContext):
return SortDecl(*self.visitChildren(ctx))
# Visit a parse tree produced by SLComp18Parser#selector_dec.
def visitSelector_dec(self, ctx:SLComp18Parser.Selector_decContext):
return Selector(*self.visitChildren(ctx))
# Visit a parse tree produced by SLComp18Parser#constructor_dec.
def visitConstructor_dec(self, ctx:SLComp18Parser.Constructor_decContext):
child_res = self.visitChildren(ctx)
return ConstructorDecl(child_res[0], child_res[1:], None)
# Visit a parse tree produced by SLComp18Parser#datatype_dec.
def visitDatatype_dec(self, ctx:SLComp18Parser.Datatype_decContext):
# The datatype will be assembled in the visitors for declare-datatype(s)
return self.visitChildren(ctx)
# Visit a parse tree produced by SLComp18Parser#function_dec.
def visitFunction_dec(self, ctx:SLComp18Parser.Function_decContext):
child_res = self.visitChildren(ctx)
return FunDecl(child_res[0], child_res[1:-1], child_res[-1])
# Visit a parse tree produced by SLComp18Parser#function_def.
def visitFunction_def(self, ctx:SLComp18Parser.Function_defContext):
name = None
params = []
resultType = None
term = None
child_res = self.visitChildren(ctx)
for arg in child_res:
t = type(arg)
if t == Symbol:
name = arg
elif t == SortedVar:
params.append(arg)
elif t == Sort:
resultType = arg
else:
term = arg
return FunDef(FunDecl(name, params, resultType), term)
# Visit a parse tree produced by SLComp18Parser#prop_literal.
def visitProp_literal(self, ctx:SLComp18Parser.Prop_literalContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#script.
def visitScript(self, ctx:SLComp18Parser.ScriptContext):
sorts = []
types = []
heap = None
consts = []
funs = []
asserts = []
meta = []
tasks = []
def process_list(args):
nonlocal types
if isinstance(args[0], DataTypeDecl):
types = args
else:
assert isinstance(args[0], FunDef)
funs.extend(args)
def set_heap(arg):
nonlocal heap
if heap is not None:
raise Exception('Duplicate heap declaration')
heap = arg
process_type = {
SortDecl: sorts.append,
list: process_list,
HeapDecl: set_heap,
ConstDecl: consts.append,
FunDef: funs.append,
Assert: asserts.append,
Meta: meta.append,
Task: tasks.append
}
for arg in self.visitChildren(ctx):
process_type[type(arg)](arg)
if heap is None:
raise Exception('No heap declaration')
if not asserts:
raise Exception('No assert => Nothing to check')
return Script(sorts, types, heap, consts, funs, asserts, meta, tasks)
# Visit a parse tree produced by SLComp18Parser#cmd_assert.
def visitCmd_assert(self, ctx:SLComp18Parser.Cmd_assertContext):
return 'assert'
# Visit a parse tree produced by SLComp18Parser#cmd_checkSat.
def visitCmd_checkSat(self, ctx:SLComp18Parser.Cmd_checkSatContext):
return 'check-sat'
# Visit a parse tree produced by SLComp18Parser#cmd_checkUnsat.
def visitCmd_checkUnsat(self, ctx:SLComp18Parser.Cmd_checkUnsatContext):
return 'check-unsat'
# Visit a parse tree produced by SLComp18Parser#cmd_checkSatAssuming.
def visitCmd_checkSatAssuming(self, ctx:SLComp18Parser.Cmd_checkSatAssumingContext):
return 'check-sat-assuming'
# Visit a parse tree produced by SLComp18Parser#cmd_declareConst.
def visitCmd_declareConst(self, ctx:SLComp18Parser.Cmd_declareConstContext):
return 'declare-const'
# Visit a parse tree produced by SLComp18Parser#cmd_declareDatatype.
def visitCmd_declareDatatype(self, ctx:SLComp18Parser.Cmd_declareDatatypeContext):
return 'declare-datatype'
# Visit a parse tree produced by SLComp18Parser#cmd_declareDatatypes.
def visitCmd_declareDatatypes(self, ctx:SLComp18Parser.Cmd_declareDatatypesContext):
return 'declare-datatypes'
# Visit a parse tree produced by SLComp18Parser#cmd_declareFun.
def visitCmd_declareFun(self, ctx:SLComp18Parser.Cmd_declareFunContext):
return 'declare-fun'
# Visit a parse tree produced by SLComp18Parser#cmd_declareHeap.
def visitCmd_declareHeap(self, ctx:SLComp18Parser.Cmd_declareHeapContext):
return 'declare-heap'
# Visit a parse tree produced by SLComp18Parser#cmd_declareSort.
def visitCmd_declareSort(self, ctx:SLComp18Parser.Cmd_declareSortContext):
return 'declare-sort'
# Visit a parse tree produced by SLComp18Parser#cmd_defineFun.
def visitCmd_defineFun(self, ctx:SLComp18Parser.Cmd_defineFunContext):
return 'define-fun'
# Visit a parse tree produced by SLComp18Parser#cmd_defineFunRec.
def visitCmd_defineFunRec(self, ctx:SLComp18Parser.Cmd_defineFunRecContext):
return 'define-fun-rec'
# Visit a parse tree produced by SLComp18Parser#cmd_defineFunsRec.
def visitCmd_defineFunsRec(self, ctx:SLComp18Parser.Cmd_defineFunsRecContext):
return 'define-funs-rec'
# Visit a parse tree produced by SLComp18Parser#cmd_defineSort.
def visitCmd_defineSort(self, ctx:SLComp18Parser.Cmd_defineSortContext):
return 'define-sort'
# Visit a parse tree produced by SLComp18Parser#cmd_echo.
def visitCmd_echo(self, ctx:SLComp18Parser.Cmd_echoContext):
return 'echo'
# Visit a parse tree produced by SLComp18Parser#cmd_exit.
def visitCmd_exit(self, ctx:SLComp18Parser.Cmd_exitContext):
return 'exit'
# Visit a parse tree produced by SLComp18Parser#cmd_getAssertions.
def visitCmd_getAssertions(self, ctx:SLComp18Parser.Cmd_getAssertionsContext):
return 'get-assertions'
# Visit a parse tree produced by SLComp18Parser#cmd_getAssignment.
def visitCmd_getAssignment(self, ctx:SLComp18Parser.Cmd_getAssignmentContext):
return 'get-assignment'
# Visit a parse tree produced by SLComp18Parser#cmd_getInfo.
def visitCmd_getInfo(self, ctx:SLComp18Parser.Cmd_getInfoContext):
return 'get-info'
# Visit a parse tree produced by SLComp18Parser#cmd_getModel.
def visitCmd_getModel(self, ctx:SLComp18Parser.Cmd_getModelContext):
return 'get-model'
# Visit a parse tree produced by SLComp18Parser#cmd_getOption.
def visitCmd_getOption(self, ctx:SLComp18Parser.Cmd_getOptionContext):
return 'get-option'
# Visit a parse tree produced by SLComp18Parser#cmd_getProof.
def visitCmd_getProof(self, ctx:SLComp18Parser.Cmd_getProofContext):
return 'get-proof'
# Visit a parse tree produced by SLComp18Parser#cmd_getUnsatAssumptions.
def visitCmd_getUnsatAssumptions(self, ctx:SLComp18Parser.Cmd_getUnsatAssumptionsContext):
return 'get-unsat-assumptions'
# Visit a parse tree produced by SLComp18Parser#cmd_getUnsatCore.
def visitCmd_getUnsatCore(self, ctx:SLComp18Parser.Cmd_getUnsatCoreContext):
return 'get-unsat-core'
# Visit a parse tree produced by SLComp18Parser#cmd_getValue.
def visitCmd_getValue(self, ctx:SLComp18Parser.Cmd_getValueContext):
return 'get-value'
# Visit a parse tree produced by SLComp18Parser#cmd_pop.
def visitCmd_pop(self, ctx:SLComp18Parser.Cmd_popContext):
return 'pop'
# Visit a parse tree produced by SLComp18Parser#cmd_push.
def visitCmd_push(self, ctx:SLComp18Parser.Cmd_pushContext):
return 'push'
# Visit a parse tree produced by SLComp18Parser#cmd_reset.
def visitCmd_reset(self, ctx:SLComp18Parser.Cmd_resetContext):
return 'reset'
# Visit a parse tree produced by SLComp18Parser#cmd_resetAssertions.
def visitCmd_resetAssertions(self, ctx:SLComp18Parser.Cmd_resetAssertionsContext):
return 'reset-assertions'
# Visit a parse tree produced by SLComp18Parser#cmd_setInfo.
def visitCmd_setInfo(self, ctx:SLComp18Parser.Cmd_setInfoContext):
return 'set-info'
# Visit a parse tree produced by SLComp18Parser#cmd_setLogic.
def visitCmd_setLogic(self, ctx:SLComp18Parser.Cmd_setLogicContext):
return 'set-logic'
# Visit a parse tree produced by SLComp18Parser#cmd_setOption.
def visitCmd_setOption(self, ctx:SLComp18Parser.Cmd_setOptionContext):
return 'set-option'
# Visit a parse tree produced by SLComp18Parser#heap_dec.
def visitHeap_dec(self, ctx:SLComp18Parser.Heap_decContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by SLComp18Parser#command.
def visitCommand(self, ctx:SLComp18Parser.CommandContext):
child_res = self.visitChildren(ctx)
if isinstance(child_res, str):
return cmdTypeToCmd[child_res]([])
else:
cmd, *args = self.visitChildren(ctx)
return cmdTypeToCmd[cmd](args)
# Visit a parse tree produced by SLComp18Parser#b_value.
def visitB_value(self, ctx:SLComp18Parser.B_valueContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#option.
def visitOption(self, ctx:SLComp18Parser.OptionContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#info_flag.
def visitInfo_flag(self, ctx:SLComp18Parser.Info_flagContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#error_behaviour.
def visitError_behaviour(self, ctx:SLComp18Parser.Error_behaviourContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#reason_unknown.
def visitReason_unknown(self, ctx:SLComp18Parser.Reason_unknownContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#model_response.
def visitModel_response(self, ctx:SLComp18Parser.Model_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#info_response.
def visitInfo_response(self, ctx:SLComp18Parser.Info_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#valuation_pair.
def visitValuation_pair(self, ctx:SLComp18Parser.Valuation_pairContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#t_valuation_pair.
def visitT_valuation_pair(self, ctx:SLComp18Parser.T_valuation_pairContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#check_sat_response.
def visitCheck_sat_response(self, ctx:SLComp18Parser.Check_sat_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#echo_response.
def visitEcho_response(self, ctx:SLComp18Parser.Echo_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#get_assertions_response.
def visitGet_assertions_response(self, ctx:SLComp18Parser.Get_assertions_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#get_assignment_response.
def visitGet_assignment_response(self, ctx:SLComp18Parser.Get_assignment_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#get_info_response.
def visitGet_info_response(self, ctx:SLComp18Parser.Get_info_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#get_model_response.
def visitGet_model_response(self, ctx:SLComp18Parser.Get_model_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#get_option_response.
def visitGet_option_response(self, ctx:SLComp18Parser.Get_option_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#get_proof_response.
def visitGet_proof_response(self, ctx:SLComp18Parser.Get_proof_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#get_unsat_assump_response.
def visitGet_unsat_assump_response(self, ctx:SLComp18Parser.Get_unsat_assump_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#get_unsat_core_response.
def visitGet_unsat_core_response(self, ctx:SLComp18Parser.Get_unsat_core_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#get_value_response.
def visitGet_value_response(self, ctx:SLComp18Parser.Get_value_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#specific_success_response.
def visitSpecific_success_response(self, ctx:SLComp18Parser.Specific_success_responseContext):
self.fail(ctx)
# Visit a parse tree produced by SLComp18Parser#general_response.
def visitGeneral_response(self, ctx:SLComp18Parser.General_responseContext):
self.fail(ctx)
del SLComp18Parser
|
[
"jkatelaan@forsyte.at"
] |
jkatelaan@forsyte.at
|
be1157870c26bc3810058e7505760fd7df952ee4
|
4b5f417838de6480211ec16c0494c5aec563a2bb
|
/rpsGame.py
|
85a3531aa49fb11b10e5c04f9ee7da8045b81aaa
|
[] |
no_license
|
srujrs/tkinter-progs
|
6bf55df48887a284b69d8a73fa714124443b4467
|
4108a84aa9248502b1c79472ab0dcaa543d2bb13
|
refs/heads/main
| 2023-03-13T22:00:34.411169
| 2021-03-08T16:35:40
| 2021-03-08T16:35:40
| 345,722,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,235
|
py
|
# Author : Srujan
# Rock Paper Scissor game with computer
import random
from tkinter import *
userScore = 0
userChoice = ""
compScore = 0
compChoice = ""
dict = {'rock' : 0,'paper' : 1,'scissor' : 2}
def getNum(str):
global dict
return dict[str]
def result(user,comp):
textArea.delete(0.0,END)
userNum = getNum(user)
compNum = getNum(comp)
global userScore
global compScore
if userNum == compNum:
textArea.insert(INSERT,"It's a Tie!\n")
elif (userNum + 1)%3 == compNum:
textArea.insert(INSERT,"Computer wins!\n")
compScore += 1
else:
textArea.insert(INSERT,"You win!\n")
userScore += 1
ans = "\nYour choice : {uc} \nComputer's choice : {cc} \n\nYour score : {u} \nComputer's score : {c}".format(uc = user,cc = comp,u = userScore,c = compScore)
textArea.insert(END,ans)
def randCompChoice():
return random.choice(['rock','paper','scissor'])
def rock():
global userChoice
global compChoice
userChoice = 'rock'
compChoice = randCompChoice()
result(userChoice,compChoice)
def paper():
global userChoice
global compChoice
userChoice = 'paper'
compChoice = randCompChoice()
result(userChoice,compChoice)
def scissors():
global userChoice
global compChoice
userChoice = 'scissor'
compChoice = randCompChoice()
result(userChoice,compChoice)
root = Tk()
root.title("Rock Paper Scissor Game")
button1 = Button(text = " Rock ",bg = "skyblue",font = "Helvetica 13 bold italic",command = rock)
button1.grid(column = 0,row = 1)
button2 = Button(text = " Paper ",bg = "lightgreen",font = "Helvetica 13 bold italic",command = paper)
button2.grid(column = 0,row = 2)
button3 = Button(text = " Scissor ",bg = "pink",font = "Helvetica 13 bold italic",command = scissors)
button3.grid(column = 0,row = 3)
textArea = Text(root,height = 12,width = 60,bg = "yellow")
textArea.insert(INSERT,"Let's play Rock-Paper-Scissor with the computer.\nCan you outsmart a computer?\nClick on the above buttons for your choice.\nLet's see what happens.")
textArea.grid(column = 0,row = 4)
root.mainloop()
|
[
"noreply@github.com"
] |
srujrs.noreply@github.com
|
e4173d511f0f3aa063805b42819eb5acad11131f
|
2d1649a7a00d49b72ed7e53afa4abb3c9281ce03
|
/.history/ParticleFilter/go_to_goal_20190420213415.py
|
bf727139b9c19455bfebad0f1f1137e7d248627c
|
[] |
no_license
|
joshzhang5/CS3630Lab6
|
9547dc6c89198e9bb4aebd8359d4feb974082d20
|
69e6df12829e18a211ae850236d74b4d728046ef
|
refs/heads/master
| 2020-05-15T13:59:51.906195
| 2019-04-22T18:21:42
| 2019-04-22T18:21:42
| 182,317,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,909
|
py
|
# Jiaxi Zhang
# George McAlear
# If you run into an "[NSApplication _setup] unrecognized selector" problem on macOS,
# try uncommenting the following snippet
try:
import matplotlib
matplotlib.use('TkAgg')
except ImportError:
pass
from skimage import color
import cozmo
import numpy as np
from numpy.linalg import inv
import threading
import time
import sys
import asyncio
from PIL import Image
from markers import detect, annotator
from grid import CozGrid
from gui import GUIWindow
from particle import Particle, Robot
from setting import *
from particle_filter import *
from utils import *
from time import sleep
import time
from cozmo.util import distance_mm, degrees, speed_mmps
from rrt import *
#particle filter functionality
class ParticleFilter:
def __init__(self, grid):
self.particles = Particle.create_random(PARTICLE_COUNT, grid)
self.grid = grid
def update(self, odom, r_marker_list):
# ---------- Motion model update ----------
self.particles = motion_update(self.particles, odom)
# ---------- Sensor (markers) model update ----------
self.particles = measurement_update(self.particles, r_marker_list, self.grid)
# ---------- Show current state ----------
# Try to find current best estimate for display
m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)
return (m_x, m_y, m_h, m_confident)
# tmp cache
last_pose = cozmo.util.Pose(0,0,0,angle_z=cozmo.util.Angle(degrees=0))
flag_odom_init = False
# map
Map_filename = "map_arena.json"
grid = CozGrid(Map_filename)
gui = GUIWindow(grid, show_camera=True)
pf = ParticleFilter(grid)
def compute_odometry(curr_pose, cvt_inch=True):
'''
Compute the odometry given the current pose of the robot (use robot.pose)
Input:
- curr_pose: a cozmo.robot.Pose representing the robot's current location
- cvt_inch: converts the odometry into grid units
Returns:
- 3-tuple (dx, dy, dh) representing the odometry
'''
global last_pose, flag_odom_init
last_x, last_y, last_h = last_pose.position.x, last_pose.position.y, \
last_pose.rotation.angle_z.degrees
curr_x, curr_y, curr_h = curr_pose.position.x, curr_pose.position.y, \
curr_pose.rotation.angle_z.degrees
dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
if cvt_inch:
dx, dy = dx / grid.scale, dy / grid.scale
return (dx, dy, diff_heading_deg(curr_h, last_h))
async def marker_processing(robot, camera_settings, show_diagnostic_image=False):
'''
Obtain the visible markers from the current frame from Cozmo's camera.
Since this is an async function, it must be called using await, for example:
markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)
Input:
- robot: cozmo.robot.Robot object
- camera_settings: 3x3 matrix representing the camera calibration settings
- show_diagnostic_image: if True, shows what the marker detector sees after processing
Returns:
- a list of detected markers, each being a 3-tuple (rx, ry, rh)
(as expected by the particle filter's measurement update)
- a PIL Image of what Cozmo's camera sees with marker annotations
'''
global grid
# Wait for the latest image from Cozmo
image_event = await robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)
# Convert the image to grayscale
image = np.array(image_event.image)
image = color.rgb2gray(image)
# Detect the markers
markers, diag = detect.detect_markers(image, camera_settings, include_diagnostics=True)
# Measured marker list for the particle filter, scaled by the grid scale
marker_list = [marker['xyh'] for marker in markers]
marker_list = [(x/grid.scale, y/grid.scale, h) for x,y,h in marker_list]
# Annotate the camera image with the markers
if not show_diagnostic_image:
annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(annotated_image, markers, scale=2)
else:
diag_image = color.gray2rgb(diag['filtered_image'])
diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(diag_image, markers, scale=2)
annotated_image = diag_image
return marker_list, annotated_image
global camera_settings
# pick up location for the robot to drive to, (x, y, theta)
goal_pose = cozmo.util.Pose(6, 12, 0, angle_z=cozmo.util.Angle(degrees=0))
current_pose = None
ERROR_OFFSET_Y = cozmo.util.Pose(x=0, y=6, z=0, angle_z=0)
async def run(robot: cozmo.robot.Robot):
global flag_odom_init, last_pose, goal_pose
global grid, gui, pf
global camera_settings
# start streaming
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = False
robot.camera.enable_auto_exposure()
await robot.set_head_angle(cozmo.util.degrees(3)).wait_for_completed()
# Obtain the camera intrinsics matrix
fx, fy = robot.camera.config.focal_length.x_y
cx, cy = robot.camera.config.center.x_y
camera_settings = np.array([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype=np.float)
###################
# pickup point
#pickup_node = Node((153, 240))
# dropoff_node = Node((544, 344))
# localize the robot
await look_around_until_converge(robot)
# intialize an explorer after localized
#cosimo = CozmoExplorer(robot, x_0=last_pose.position.x, y_0=last_pose.position.y, theta_0=last_pose.rotation.angle_z.radians)
# move robot to pickup zone once localized
print("LAST POSE IS:", last_pose)
#print("COZMO CONVERTED THAT TO A START AT:", cosimo.last_arena_pose)
directions = goal_pose - last_pose
current_pose = last_pose
last_robot_pose = robot.pose
print("SETTING LAST ROBOT POSE TO: ", last_robot_pose)
print("SO WE GOING TO FOLLOW THIS TO PICKUP ZONE:", directions)
await execute_directions(robot, directions)
await robot.turn_in_place(angle=cozmo.util.Angle(degrees=45)).wait_for_completed()
print("LAST ROBOT POSE IS: ", last_robot_pose)
print("CURRENT POSE IS:", robot.pose)
print("WE THINK WE MOVED THIS MUCH TO GO TO PICKUP ZONE: ", convertPoseToInches(robot.pose - last_robot_pose))
current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose) - ERROR_OFFSET_Y
last_robot_pose = robot.pose
print("COZMO THINKS IT IS AT AFTER DRIVING TO PICKUPZONE: ", current_pose)
await robot.say_text('Ready for pick up!').wait_for_completed()
drop_off_directions = [(3, 4.5, 0), (21.75, 4.5, 90), (21.75, 13.75, 90)]
pick_up_directions = [(21.75, 4.5, 90), (3, 4.5, 0), (4.5, 20)]
while True:
cube = await robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube: %s" % cube)
await robot.pickup_object(cube, num_retries=5).wait_for_completed()
current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose) - ERROR_OFFSET_Y
print("WE THINK WE MOVED THIS MUCH TO PICK UP CUBE: ", convertPoseToInches(robot.pose - last_robot_pose))
last_robot_pose = robot.pose
#cosimo.update_pose()
print("COZMO THINKS IT IS AT AFTER PICKING UP CUBE: ", current_pose)
#await look_around_until_converge(robot)
# intialize an explorer after localized
#cosimo = CozmoExplorer(robot, x_0=last_pose.position.x, y_0=last_pose.position.y, theta_0=last_pose.rotation.angle_z.radians)
# move robot to pickup zone once localized
#print("COZMO CONVERTED THAT TO A START AT:", cosimo.last_arena_pose)
#current_pose = last_pose
# rrt to drop zone and drop off cube
for destination in drop_off_directions:
directions = convertInchesToPose(destination) - current_pose
await execute_directions(robot,directions)
current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose) ERROR_OFFSET_Y
print("WE THINK WE MOVED THIS MUCH TO FOLLOW DIRECTIONS: ", convertPoseToInches(robot.pose - last_robot_pose))
last_robot_pose = robot.pose
print("COZMO THINKS IT IS AT AFTER FOLLOWING DIRECTIONS: ", current_pose)
#await cosimo.go_to_goal(goal_node=dropoff_node)
await robot.set_lift_height(0.0).wait_for_completed()
# rrt to just in front of pick up zone
# await cosimo.go_to_goal(goal_node=pickup_node)
def convertPoseToInches(pose):
pos = pose.position
angle = pose.rotation.angle_z
return cozmo.util.Pose(x=pos.x/25, y=pos.y/25, z=0, angle_z=angle)
def convertInchesToPose(position):
return cozmo.util.Pose(x=position[0], y=position[1], z=0, angle_z=cozmo.util.Angle(degrees=position[2]))
async def execute_directions(robot, directions):
await robot.turn_in_place(angle=directions.rotation.angle_z).wait_for_completed()
await robot.drive_straight(distance=distance_mm(directions.position.x * grid.scale), speed=speed_mmps(80)).wait_for_completed()
await robot.turn_in_place(angle=cozmo.util.Angle(degrees=90)).wait_for_completed()
await robot.drive_straight(distance=distance_mm(directions.position.y * grid.scale), speed=speed_mmps(80)).wait_for_completed()
async def look_around_until_converge(robot: cozmo.robot.Robot):
# globals
global flag_odom_init, last_pose
global grid, gui, pf
# reset variables
conf = False
last_pose = cozmo.util.Pose(0,0,0,angle_z=cozmo.util.Angle(degrees=0))
pf = ParticleFilter(grid)
# reset lift and head
await robot.set_lift_height(0.0).wait_for_completed()
await robot.set_head_angle(cozmo.util.degrees(3)).wait_for_completed()
while not conf:
if (await is_picked_up(robot)):
continue
# move a little
last_pose = robot.pose
await robot.turn_in_place(angle=cozmo.util.Angle(degrees=20)).wait_for_completed()
curr_pose = robot.pose
detected_markers, camera_image = await marker_processing(robot, camera_settings)
# update, motion, and measurment with the odometry and marker data
odometry = compute_odometry(curr_pose)
curr_x, curr_y, curr_h, conf = pf.update(odometry, detected_markers)
# update gui
gui.show_particles(pf.particles)
gui.show_mean(curr_x, curr_y, curr_h)
gui.show_camera_image(camera_image)
gui.updated.set()
last_pose = cozmo.util.Pose(curr_x , curr_y, 0, angle_z=cozmo.util.Angle(degrees=curr_h))
return last_pose
async def robot_is_at_goal(robot):
global goal_pose
await robot.play_anim(name="anim_poked_giggle").wait_for_completed()
while not robot.is_picked_up:
await asyncio.sleep(.1)
await robot.say_text("Put me down!").wait_for_completed()
async def is_picked_up(robot):
global pf, last_pose
if robot.is_picked_up:
await robot.say_text("Put me down!").wait_for_completed()
last_pose = cozmo.util.Pose(0,0,0,angle_z=cozmo.util.Angle(degrees=0))
pf = ParticleFilter(grid)
while (robot.is_picked_up):
await asyncio.sleep(0.01)
return True
return False
class CozmoThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, daemon=False)
def run(self):
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger
cozmo.run_program(run, use_viewer=False)
if __name__ == '__main__':
# cozmo thread
cozmo_thread = CozmoThread()
cozmo_thread.start()
# init
gui.show_particles(pf.particles)
gui.show_mean(0, 0, 0)
gui.start()
|
[
"josh@lawn-143-215-110-217.lawn.gatech.edu"
] |
josh@lawn-143-215-110-217.lawn.gatech.edu
|
ef5607adbade8757f40dac6af98f55570129501b
|
bf3cad5b0a7c9b85d0ed40d2378ae6b0d341fd76
|
/Syntax of Python/02.자료형/02-7.불.py
|
c1341622c8183bad9bb961774903d92225357f99
|
[] |
no_license
|
yallyyally/algo-moreugo
|
c57bc64898aab3de3ea995fffa6b20285fb8aa5f
|
6a33413fb05c1518616074344c404fb718a1a4a5
|
refs/heads/master
| 2023-07-05T17:46:39.634173
| 2021-08-10T12:40:27
| 2021-08-10T12:40:27
| 278,123,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
#02. 파이썬 프로그래밍의 기초, 자료형
######################### 02-7. 불 #########################
a =True
b = False
print(type(a))
print(type(b))
#<class 'bool'>
a = [1,2,3,4]
while a:
print(a.pop())
print(a)
b=0
print(bool(b)) #False
a=3
print(bool(a)) #True
|
[
"jhshin0028@gmail.com"
] |
jhshin0028@gmail.com
|
c35818a73275b8029d65f14dd6a85a000569aa56
|
2c48935ad23cd36baadce15f977356dc6f32ff57
|
/wafw00f/plugins/safedog.py
|
f8604835d911b64888c3451958dcf4a5fa7d0e19
|
[
"BSD-3-Clause"
] |
permissive
|
j0k2r/wafw00f
|
d051e2e37a5f65a1ee5bdc64064f3652c4e0c4be
|
bac59a6a5aadaa057855c2d28ab410f88fc85ba0
|
refs/heads/master
| 2020-04-27T13:15:50.060366
| 2019-03-07T14:47:00
| 2019-03-07T14:47:00
| 174,363,017
| 0
| 0
|
NOASSERTION
| 2019-03-07T14:43:22
| 2019-03-07T14:43:21
| null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
#!/usr/bin/env python
NAME = 'Safedog'
def is_waf(self):
if self.matchcookie('^safedog-flow-item='):
return True
if self.matchheader(('server', '^Safedog')):
return True
if self.matchheader(('x-powered-by', '^WAF/\d\.\d')):
return True
return False
|
[
"bcoles@gmail.com"
] |
bcoles@gmail.com
|
8edc52df94f88d7fe727961adda4b8cefc9e6b23
|
5a2aa4fb7ad9dac4460ce2280c67a5a6ba853bcf
|
/mysite/settings.py
|
d22b0f70d235867fa459b1dd587b6f7593a747fd
|
[] |
no_license
|
Cori1999/Harp2.0
|
4052e2cafc977ee77393708b40e4b0cd92445178
|
1ef27db6ee2f6702f6655026367ac3c27a7fb2be
|
refs/heads/master
| 2023-04-19T04:50:20.155217
| 2021-05-05T03:50:15
| 2021-05-05T03:50:15
| 363,473,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,146
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b#s*_o(3t3ai_k(c5po@h7a=nj5#vjkd3u7ckhnx@)mi=8fn67'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mysite',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = ['*']
X_FRAME_OPTIONS = '*'
|
[
""
] | |
2ae57b1496df2ac45b96d5c3abe297952b513ea4
|
74494eea24815967e542f78e3d8c63e937d3f5e4
|
/zappit-project/posts/views.py
|
31cabd71eade0eeeb210c326a903c60bc1c8cb42
|
[
"MIT"
] |
permissive
|
madhav06/Create_API_using_Django
|
aa99b69f9a039302a5eca85f65098045d51f797c
|
f779fe7aae0de5f8680e9dbac8e7980b98ece2ad
|
refs/heads/master
| 2023-07-01T06:05:58.723307
| 2021-08-05T08:32:30
| 2021-08-05T08:32:30
| 390,591,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
from django.shortcuts import render
from rest_framework import generics, permissions, mixins, status
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from .models import Post, Vote
from .serializers import PostSerializer, VoteSerializer
# Create your views here.
class PostList(generics.ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def perform_create(self, serializer):
# if request.user.is_authenticated()
serializer.save(poster=self.request.user)
class PostRetrieveDestroy(generics.RetrieveDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def delete(self, request, *args, **kwargs):
post = Post.objects.filter(pk=kwargs['pk'], poster=self.request.user)
if post.exists():
return self.destroy(request, *args, **kwargs)
else:
raise ValidationError(" This is not your post to delete. ")
class VoteCreate(generics.CreateAPIView, mixins.DestroyModelMixin):
serializer_class = VoteSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
user = self.request.user
post = Post.objects.get(pk=self.kwargs['pk'])
return Vote.objects.filter(voter=user, post=post)
def perform_create(self, serializer):
if self.get_queryset().exists():
raise ValidationError(" You have already voted :) ")
serializer.save(voter=self.request.user, post=Post.objects.get(pk=self.kwargs['pk']))
def delete(self, request, *args, **kwargs):
if self.get_queryset().exists():
self.get_queryset().delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
raise ValidationError(" You never voted for this post. ")
|
[
"mnandan06@gmail.com"
] |
mnandan06@gmail.com
|
3a377cf04afa094b2440a2248de30b8b17054650
|
0e934cb0ec2ff10718eb7013792f9cf9a78d660f
|
/cloudformation/lambda/timestamp/index.py
|
fe2b5abe62423c10d40d812a9b4fd0c0f458eaae
|
[
"MIT"
] |
permissive
|
youngvz/youngvz-aws
|
2fc5b7d64328da5ac168aa1872a52b7ef8ac04ee
|
b5678322805b4b6bcae592d1971999ebf5a21a7c
|
refs/heads/main
| 2023-02-27T12:01:52.255791
| 2021-02-03T18:50:48
| 2021-02-03T18:50:48
| 335,713,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
import boto3
from datetime import datetime
def lambda_handler(event, context):
# get current time as string
datetime_string = datetime.now().strftime("%d-%m-%Y %H:%M")
encoded_string = datetime_string.encode("utf-8")
bucket_name = "youngvz-python-code"
file_name = f'{datetime_string}.txt'
s3_path = "timestamp-lambda/output/" + file_name
s3 = boto3.resource("s3")
s3.Bucket(bucket_name).put_object(Key=s3_path, Body=encoded_string)
return s3_path
|
[
"shah.a.viraj@gmail.com"
] |
shah.a.viraj@gmail.com
|
6751196599c1a572e8ac765e4df76c7401f33e18
|
260aff137c7fc6891b5bea32823148bf7cf85186
|
/main.py
|
c69cfce3af6ac80d45f46e727c7f37635794f330
|
[] |
no_license
|
sebastianmorta/IntroductionToComplexSystems
|
0a714588a4e17e344c5e3d1ede2f4af06cf5489a
|
3925cc5da25774943249d2cf83540152e2718f71
|
refs/heads/master
| 2023-06-02T22:03:13.162302
| 2021-06-19T23:18:45
| 2021-06-19T23:18:45
| 369,873,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,841
|
py
|
import csv
import random
from copy import deepcopy
from random import randint, choice, random
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from classes import Graph, Node, Edge
# def equality(a):
# edge_list = []
# l = int((a / 10) ** (-a)) + 10
# tmp = [x * 0.1 + a / 10 for x in range(0, l)]
# for i in tmp:
# # edge_list.append(int(i ** (-a))+5)
# edge_list.append(int(i ** (-a)) if i ** (-a) >= 1 else 1)
# return len(edge_list), edge_list
# def equality(a):
# l = int((a / 10) ** (-a)) + 10
# tmp = [x * 0.1 + a / 10 for x in range(0, l)]
# t = [x for x in tmp if x <= 1.01]
# edge_list = [int(t[i] ** (-a)) if t[i] ** (-a) >= 1 else 1 for i in range(len(t)) for _ in range(i * 2)]
# return len(edge_list), edge_list
def equality(a, N):
values_base = np.linspace(1, 40, N)
# print("valvas", values_base)
edge_list = [int((v ** (-a)) * N) if int((v ** (-a)) * N) > 1 else 1 for v in values_base]
# l = int((a / 10) ** (-a)) + 10
# tmp = [x * 0.1 + a / 10 for x in range(0, l)]
# t = [x for x in tmp if x <= 1.01]
# edge_list = [int(t[i] ** (-a)) if t[i] ** (-a) >= 1 else 1 for i in range(len(t)) for _ in range(i * 280)]
return len(edge_list), edge_list
def initMutatnsChart1(g):
for node in g.nodes:
if node.amount_of_edges > g.average_amount_of_edges:
node.is_mutant = True
def initMutatnsChart2(g, level):
for node in g.nodes:
if node.amount_of_edges == level:
node.is_mutant = True
break
def drawGraph(g):
df = pd.read_csv("innovators.csv")
df1 = df[['from', 'to']]
color_map = []
count_mutatns = 1
count_not_mutatns = 1
# G = nx.Graph()
# G = nx.from_pandas_edgelist(df1, 'from', 'to')
for node in g.nodes:
if node.is_mutant:
color_map.append('red')
count_mutatns += 1
else:
color_map.append('green')
count_not_mutatns += 1
# nx.draw(G, node_color=color_map, with_labels=True)
# plt.show()
print("color", color_map)
print("mutants amount", count_mutatns, "---", count_mutatns / (count_not_mutatns + count_mutatns) * 100, "%")
print("not mutants amount", count_not_mutatns, "---", count_not_mutatns / (count_not_mutatns + count_mutatns) * 100,
"%")
def saveToCSV(data, name):
with open(name + ".csv", 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(data)
def getAmoutnOfMutatns(g):
count_mutatns = 1
count_not_mutatns = 1
for node in g.nodes:
if node.is_mutant:
count_mutatns += 1
else:
count_not_mutatns += 1
return [count_mutatns, count_not_mutatns, count_mutatns / (count_mutatns + count_not_mutatns),
count_not_mutatns / (count_mutatns + count_not_mutatns), count_mutatns / count_not_mutatns]
a, b = equality(2.5, 10000)
# g = Graph(10, [7, 4, 3, 2, 2, 2, 1, 1, 1, 1])
g1 = Graph(a, b)
print("avg ", g1.average_amount_of_edges)
g1.assignEdgesToNodes()
g2 = deepcopy(g1)
g3 = deepcopy(g1)
# g.printer()
# g.saveToCSV()
print("sum", sum(b))
print("b", b)
print("len", len(b))
print(sum(g1.check()))
initMutatnsChart1(g1)
initMutatnsChart1(g2)
initMutatnsChart1(g3)
# a, b = equality(2.5)
# print(a)
# g=[1,2,3,4,5,6,7,8,9]
# print(g[1:])
# df = pd.read_csv("book1.csv")
# print(df)
# df = df.loc[df['weight'] > 10, :]
# print(df)
# df1 = df[['Source', 'Target']]
# print(df1)
data_for_g1 = np.empty((0, 5), int)
data_for_g2 = np.empty((0, 5), int)
data_for_g3 = np.empty((0, 5), int)
#
# print("g1")
# drawGraph(g1)
# for i in range(10000):
# data_for_g1 = np.append(data_for_g1, np.array([getAmoutnOfMutatns(g1)]), axis=0)
# for j in range(1000):
# g1.linkBiasedDynamiks(0.4, j)
# saveToCSV(data_for_g1, "dataw g1")
# drawGraph(g1)
# print("g2")
# drawGraph(g2)
# for i in range(10000):
# data_for_g2 = np.append(data_for_g2, np.array([getAmoutnOfMutatns(g2)]), axis=0)
# for j in range(100):
# g2.voterModel(0.0009, j)
# saveToCSV(data_for_g2, "dataw g2")
# drawGraph(g2)
print("g3")
drawGraph(g3)
for i in range(10000):
data_for_g3 = np.append(data_for_g3, np.array([getAmoutnOfMutatns(g3)]), axis=0)
for j in range(1000):
g3.invasionModel(0.5)
saveToCSV(data_for_g3, "dataww g3")
drawGraph(g3)
def ploter(name):
data = pd.DataFrame(
pd.read_csv(name + r".csv", sep=',', skiprows=1, engine='python'))
y = data[2]
x = np.linspace(0, 5, len(y))
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(x, y, 'r-', label="State 1")
plt.ylabel(r"ilosc mutantow", size=16)
plt.xlabel("t", size=16)
plt.legend(prop={'size': 12})
plt.grid(1, 'major')
# plt.savefig("plt1.png")
plt.show()
|
[
"mr.mchaker@gmail.com"
] |
mr.mchaker@gmail.com
|
d5cc78f1f9ce550f7696afd11ed547102002b36f
|
939ca419e7bcfda248bdc6636f88971a14bd1a1f
|
/pytests/eventing/eventing_n1ql.py
|
dc01ecfb77b276fd075c1c842512c47c13b77207
|
[] |
no_license
|
ritamcouchbase/secrets-mgmt
|
061515c4a3c100f42ca2ce57f63d4a1acf85745c
|
03641bdb63e298a7c966789559ea22e7129fb7d3
|
refs/heads/master
| 2022-11-20T06:22:09.954115
| 2019-10-03T10:12:26
| 2019-10-03T10:12:26
| 98,027,382
| 0
| 1
| null | 2020-07-24T07:11:59
| 2017-07-22T11:35:37
|
Python
|
UTF-8
|
Python
| false
| false
| 16,752
|
py
|
import copy
from lib.couchbase_helper.documentgenerator import JSONNonDocGenerator
from lib.membase.api.rest_client import RestConnection
from lib.testconstants import STANDARD_BUCKET_PORT
from pytests.eventing.eventing_constants import HANDLER_CODE,HANDLER_CODE_ERROR
from pytests.eventing.eventing_base import EventingBaseTest, log
from lib.couchbase_helper.tuq_helper import N1QLHelper
from pytests.security.rbacmain import rbacmain
from lib.remote.remote_util import RemoteMachineShellConnection
import json
class EventingN1QL(EventingBaseTest):
def setUp(self):
super(EventingN1QL, self).setUp()
if self.create_functions_buckets:
self.bucket_size = 100
log.info(self.bucket_size)
bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
replicas=self.num_replicas)
self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.src_bucket = RestConnection(self.master).get_buckets()
self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.gens_load = self.generate_docs(self.docs_per_day)
self.expiry = 3
self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
self.n1ql_helper = N1QLHelper(shell=self.shell,
max_verify=self.max_verify,
buckets=self.buckets,
item_flag=self.item_flag,
n1ql_port=self.n1ql_port,
full_docs_list=self.full_docs_list,
log=self.log, input=self.input,
master=self.master,
use_rest=True
)
def tearDown(self):
super(EventingN1QL, self).tearDown()
def test_delete_from_n1ql_from_update(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.N1QL_DELETE_UPDATE, worker_count=3)
try:
self.deploy_function(body)
except Exception as ex:
if "Can not execute DML query on bucket" not in str(ex):
self.fail("recursive mutations are allowed through n1ql")
def test_n1ql_prepare_statement(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
query = "PREPARE test from DELETE from " + self.src_bucket_name + " where mutated=0"
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.N1QL_PREPARE, worker_count=3)
self.deploy_function(body)
# Wait for eventing to catch up with all the create mutations and verify results
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016, on_delete=True)
self.undeploy_and_delete_function(body)
query = "drop primary index on " + self.src_bucket_name
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
def test_n1ql_DML(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
body = self.create_save_function_body(self.function_name,HANDLER_CODE.N1QL_DML,dcp_stream_boundary="from_now",
execution_timeout=15)
self.deploy_function(body)
query = "UPDATE "+self.src_bucket_name+" set mutated=1 where mutated=0 limit 1"
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
self.verify_eventing_results(self.function_name, 6, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_n1ql_DDL(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name,HANDLER_CODE.N1QL_DDL,dcp_stream_boundary="from_now",
execution_timeout=15)
self.deploy_function(body)
#create a mutation via N1QL
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
query = "UPDATE "+self.src_bucket_name+" set mutated=1 where mutated=0 limit 1"
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
#verify deployment should fail
self.verify_eventing_results(self.function_name, 3, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_recursive_mutation_n1ql(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.RECURSIVE_MUTATION,
dcp_stream_boundary="from_now",execution_timeout=15)
self.deploy_function(body)
# create a mutation via N1QL
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
query = "UPDATE " + self.src_bucket_name + " set mutated=1 where mutated=0 limit 1"
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
# verify deployment should fail
self.verify_eventing_results(self.function_name, 0)
self.undeploy_and_delete_function(body)
def test_grant_revoke(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name,HANDLER_CODE.GRANT_REVOKE,dcp_stream_boundary="from_now",
execution_timeout=15)
self.deploy_function(body)
#create a mutation via N1QL
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
query = "UPDATE "+self.src_bucket_name+" set mutated=1 where mutated=0 limit 1"
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
#verify deployment should fail
self.verify_eventing_results(self.function_name, 2, skip_stats_validation=True)
self.verify_user_noroles("cbadminbucket")
self.undeploy_and_delete_function(body)
def test_n1ql_curl(self):
n1ql_nodes = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=True)
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
self.rest.create_whitelist(self.master, {"all_access": True})
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name,HANDLER_CODE.CURL,dcp_stream_boundary="from_now",
execution_timeout=15)
self.deploy_function(body)
# create a mutation via N1QL
query = "UPDATE "+self.src_bucket_name+" set mutated=1 where mutated=0 limit 1"
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
self.verify_eventing_results(self.function_name, 1, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_anonymous(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name,HANDLER_CODE.ANONYMOUS,dcp_stream_boundary="from_now"
,execution_timeout=5)
self.deploy_function(body)
#create a mutation via N1QL
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
query = "UPDATE "+self.src_bucket_name+" set mutated=1 where mutated=0 limit 1"
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
#verify that n1ql query will fail
self.verify_eventing_results(self.function_name, 2, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_recursion_function(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.RECURSION_FUNCTION,
dcp_stream_boundary="from_now",execution_timeout=5)
self.deploy_function(body)
# create a mutation via N1QL
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
query = "UPDATE " + self.src_bucket_name + " set mutated=1 where mutated=0 limit 1"
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
# verify that n1ql query will fail
self.verify_eventing_results(self.function_name, 2, skip_stats_validation=True)
self.undeploy_and_delete_function(body)
def test_global_variable(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.GLOBAL_VARIABLE,
dcp_stream_boundary="from_now")
try :
self.deploy_function(body,deployment_fail=True)
except Exception as e:
if "Only function declaration are allowed in global scope" not in str(e):
self.fail("Deployment is expected to be failed but no message of failure")
def test_empty_handler(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.EMPTY,
dcp_stream_boundary="from_now")
self.deploy_function(body,deployment_fail=True)
# TODO : more assertion needs to be validate after MB-27126
def test_without_update_delete(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.RANDOM,
dcp_stream_boundary="from_now")
self.deploy_function(body, deployment_fail=True)
# TODO : more assertion needs to be validate after MB-27126
def test_anonymous_with_cron_timer(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.ANONYMOUS_CRON_TIMER,
dcp_stream_boundary="from_now")
self.deploy_function(body, deployment_fail=True)
# TODO : more assertion needs to be validate after MB-27155
def test_anonymous_with_doc_timer(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.ANONYMOUS_DOC_TIMER,
dcp_stream_boundary="from_now")
self.deploy_function(body, deployment_fail=True)
# TODO : more assertion needs to be validate after MB-27155
def test_n1ql_iterator(self):
self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
body = self.create_save_function_body(self.function_name,HANDLER_CODE.N1QL_ITERATOR,dcp_stream_boundary="from_now",execution_timeout=15)
self.deploy_function(body)
query = "UPDATE "+self.src_bucket_name+" set mutated=1 where mutated=0 limit 1"
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
self.verify_eventing_results(self.function_name, self.docs_per_day * 2016 , skip_stats_validation=True)
self.undeploy_and_delete_function(body)
# This was moved from base class to here because http://ci-eventing.northscale.in/ was failing as it could not find
# from pytests.security.rbacmain import rbacmain
def verify_user_noroles(self,username):
status, content, header=rbacmain(self.master)._retrieve_user_roles()
res = json.loads(content)
userExist=False
for ele in res:
log.debug("user {0}".format(ele["name"]))
log.debug(ele["name"] == username)
if ele["name"] == username:
log.debug("user roles {0}".format(ele["roles"]))
if not ele["roles"]:
log.info("user {0} has no roles".format(username))
userExist=True
break
if not userExist:
raise Exception("user {0} roles are not empty".format(username))
def test_n1ql_iterators_with_break_and_continue(self):
values = ['1', '10']
# create 100 non json docs
# number of docs is intentionally reduced as handler code runs 1 n1ql queries/mutation
gen_load_non_json = JSONNonDocGenerator('non_json_docs', values, start=0, end=100)
gen_load_non_json_del = copy.deepcopy(gen_load_non_json)
self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
self.n1ql_helper = N1QLHelper(shell=self.shell,
max_verify=self.max_verify,
buckets=self.buckets,
item_flag=self.item_flag,
n1ql_port=self.n1ql_port,
full_docs_list=self.full_docs_list,
log=self.log, input=self.input,
master=self.master,
use_rest=True
)
# primary index is required as we run some queries from handler code
self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
# load the data
self.cluster.load_gen_docs(self.master, self.src_bucket_name, gen_load_non_json, self.buckets[0].kvs[1],
'create', compression=self.sdk_compression)
body = self.create_save_function_body(self.function_name, HANDLER_CODE.N1QL_ITERATORS, execution_timeout=60)
self.deploy_function(body)
# Wait for eventing to catch up with all the update mutations and verify results
self.verify_eventing_results(self.function_name, 100)
# delete all the docs
self.cluster.load_gen_docs(self.master, self.src_bucket_name, gen_load_non_json_del, self.buckets[0].kvs[1],
'delete', compression=self.sdk_compression)
# Wait for eventing to catch up with all the delete mutations and verify results
self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
# undeploy and delete the function
self.undeploy_and_delete_function(body)
# delete all the primary indexes
self.n1ql_helper.drop_primary_index(using_gsi=True, server=self.n1ql_node)
|
[
"ritamcouchbase@gmail.com"
] |
ritamcouchbase@gmail.com
|
21a332cdab9c0dde86a5d4a415767c5dcd351fe7
|
f2658c4bd7f833ace25ac2b63e88317b05f4602d
|
/2017 July/2017-July-11/st_rdf_test/model2/NodesZipcenter.py
|
ec4e6e0e8b2b4e87d7d17653e800ce6df9f09575
|
[] |
no_license
|
xiaochao00/telanav_diary
|
e4c34ac0a14b65e4930e32012cc2202ff4ed91e2
|
3c583695e2880322483f526c98217c04286af9b2
|
refs/heads/master
| 2022-01-06T19:42:55.504845
| 2019-05-17T03:11:46
| 2019-05-17T03:11:46
| 108,958,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,638
|
py
|
#-------------------------------------------------------------------------------
# Name: NodesZipcenter model
# Purpose: this model is used to mapping the ...
# columns: [ ]
#
# Author: rex
#
# Created: 10/12/2015
# Copyright: (c) rex 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
from record import Record
from constants import *
import os
import sys
import datetime
import json
ROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),"..")
GLOBAL_KEY_PREFIX = "nodes_zip_center_"
CSV_SEP = '`'
LF = '\n'
#(key, category, function)
STATISTIC_KEYS = (("type",False,"type"),
("link_id", False, "link_id"),
("postal_code", False, "postal_code"),
("iso", True, "iso"))
class NodesZipcenter(Record):
def __init__(self, region):
Record.__init__(self)
self.dump_file = os.path.join(ROOT_DIR, "temporary", self.__class__.__name__)
self.stat = {}
self.region = region
def dump2file(self):
cmd = "SELECT \
rpcm.link_id, \
rpcm.full_postal_code, \
rpcm.geo_level, \
rpcm.iso_country_code \
FROM \
public.rdf_postal_code_midpoint AS rpcm \
WHERE rpcm.iso_country_code IN (%s)"%(REGION_COUNTRY_CODES(self.region, GLOBAL_KEY_PREFIX))
print cmd
self.cursor.copy_expert("COPY (%s) TO STDOUT DELIMITER '`'"%(cmd),open(self.dump_file,"w"))
def get_statistic(self):
return {}
self.dump2file()
processcount = 0
with open(self.dump_file, "r",1024*1024*1024) as csv_f:
for line in csv_f:
line = line.rstrip()
line_p = line.split(CSV_SEP)
if len(line_p) < 1:
continue
self.__statistic(line_p)
processcount += 1
if processcount%5000 == 0:
print "\rProcess index [ "+str(processcount)+" ]",
print "\rProcess index [ "+str(processcount)+" ]",
# write to file
with open(os.path.join(ROOT_DIR, "output", "stat", self.__class__.__name__), 'w') as stf:
stf.write(json.dumps(self.stat))
return self.stat
def __statistic(self,line):
for keys in STATISTIC_KEYS:
try:
getattr(self,'_NodesZipcenter__get_'+keys[2])(keys,line)
except:
print "The statistic [ %s ] didn't exist"%(keys[2])
print ("Unexpected error:[ NodesZipcenter.py->__statistic] "+str(sys.exc_info()))
def __count(self,key):
if self.stat.has_key(key):
self.stat[key] += 1
else:
self.stat[key] = 1
# all statistic method
def __get_type(self,keys,line):
if '\N' != line[0]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_link_id(self,keys,line):
if '\N' != line[0]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_postal_code(self,keys,line):
if '\N' != line[1]:
self.__count("%s%s"%(GLOBAL_KEY_PREFIX,keys[0]))
def __get_iso(self,keys,line):
if '\N' != line[3]:
self.__count("%s%s%s"%(GLOBAL_KEY_PREFIX,keys[0],keys[1] and "#%s"%(line[3]) or ""))
if __name__ == "__main__":
# use to test this model
bg = datetime.datetime.now()
stat = NodesZipcenter('na').get_statistic()
keys = stat.keys()
print "==>"
print "{%s}"%(",".join(map(lambda px: "\"%s\":%s"%(px,stat[px]) ,keys)))
print "<=="
ed = datetime.datetime.now()
print "Cost time:"+str(ed - bg)
|
[
"1363180272@qq.com"
] |
1363180272@qq.com
|
2f7bef7463a93a09d17ac89ae059f7c287ebeb2b
|
3374e63c2df581e00182e3786c8e12e004d48bc7
|
/pillshere/urls.py
|
501e1b6c5eac37332629e1f5f7ef27bd2f9ac5a6
|
[] |
no_license
|
RCiv277/To-Delete
|
7e8d45410712b8212fe35b5163a45d3cb0fdb083
|
568ada8a5e123013460a5c48ec15950a2747de05
|
refs/heads/master
| 2020-11-26T02:07:03.518010
| 2019-12-18T22:38:45
| 2019-12-18T22:38:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
"""pillshere URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"zfero277@ymail.com"
] |
zfero277@ymail.com
|
51aa322351e2e79e427670a82ad73537dac8b463
|
3df0af6186df880e5e0c3667a04adc38f8af5806
|
/grumblr/migrations/0003_profile.py
|
f557c0d53548e795604e796b1b8d4b547c550a4c
|
[] |
no_license
|
NatureLLL/18738
|
9e43d78df0a5853410668e4276d5058cbfd2e655
|
157b3f55bdab8c6cda05fe9b126c2d5c0edd0caa
|
refs/heads/master
| 2020-05-01T00:16:18.404181
| 2019-03-28T13:41:36
| 2019-03-28T13:41:36
| 177,165,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
# Generated by Django 2.1.1 on 2018-09-27 17:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('grumblr', '0002_auto_20180927_0155'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstname', models.CharField(max_length=200)),
('lastname', models.CharField(max_length=200)),
('age', models.PositiveIntegerField()),
('bio', models.CharField(max_length=42)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"leiyuqia@gmail.com"
] |
leiyuqia@gmail.com
|
ad3e20b93f0a0facb1cc91b818e0a416f7b39170
|
48b57de1b4cf6f1858ac88e17699ca8dbc99ec83
|
/project/settings/dev.py
|
cf596d65b5e13251eabf580f7942aa32f8974d7f
|
[] |
no_license
|
rooeydaniel/cc-ecommerce
|
2316250005b6a91a0d76cdeb27c44677a45e21ff
|
4afd694550cfe315249f322ceb81b1bae66d9ce8
|
refs/heads/master
| 2016-09-09T17:32:26.732734
| 2014-04-06T22:06:38
| 2014-04-06T22:06:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
"""Development settings and globals."""
from base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jxp9w@b$iso+&877e#_@@5yc%y3iz_#a1h-za%!_ui9doeqn7@'
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'angular_django_ecommerce',
'USER': 'da_ecommerce',
'PASSWORD': 'da_ecommerce',
'HOST': 'localhost'
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION - This requires a database
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INSTALLED_APPS += (
'debug_toolbar',
)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INTERNAL_IPS = ('127.0.0.1', 'localhost',)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
########## END TOOLBAR CONFIGURATION
|
[
"dan.stephenson@gmail.com"
] |
dan.stephenson@gmail.com
|
835c037622f6c3e7b9571630c5a72946f03a6886
|
88de1212fa9b5e723f960c700769841010095c89
|
/app.py
|
66cdb10cdc5ac936f20dbea648e1e1332d848cc0
|
[] |
no_license
|
hitheshini/HeartDiseasePrediction
|
d2657e7cc7ce4321ae4129ca6d145fcc2f45481c
|
cdb8ba547a7f6ef0004e68edb2f55eed6a584193
|
refs/heads/main
| 2023-06-20T15:59:11.353949
| 2021-07-18T21:20:59
| 2021-07-18T21:20:59
| 387,406,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,133
|
py
|
import numpy as np
import pandas as pd
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
input_features = [float(x) for x in request.form.values()]
features_value = [np.array(input_features)]
features_name = [ "age", "trestbps","chol","thalach", "oldpeak", "sex_0",
" sex_1", "cp_0", "cp_1", "cp_2", "cp_3"," fbs_0",
"restecg_0","restecg_1","restecg_2","exang_0","exang_1",
"slope_0","slope_1","slope_2","ca_0","ca_1","ca_2","thal_1",
"thal_2","thal_3"]
df = pd.DataFrame(features_value, columns=features_name)
output = model.predict(df)
if output == 1:
res_val = "** heart disease **"
else:
res_val = "no heart disease "
return render_template('index.html', prediction_text='Patient has {}'.format(res_val))
if __name__ == "__main__":
app.run()
|
[
"hitheshini.1si18cs020@gmail.com"
] |
hitheshini.1si18cs020@gmail.com
|
fc04a0caccf658546cfbbf93482cc3c97c9e9425
|
370122fb062e84d1f439586ba47013f8394aea79
|
/ejercicio6_curso.py
|
2327eabfb13a63e3455978b77cd23ddc103dcf21
|
[] |
no_license
|
alexcaxtro/intro_python
|
89cb96521dd5a523867dcb8ebefe9478fcae8520
|
f675c475757cad83dc45edb7b2ad2b18a4a7cd5c
|
refs/heads/master
| 2023-07-13T05:54:58.428486
| 2021-08-11T20:24:34
| 2021-08-11T20:24:34
| 395,106,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#Determinar si un número es par o impar
numero = input('Coloca un número y te diré si es PAR o impar ')
number = int(numero)
if number % 2 == 0:
print('El número es PAR')
else:
print('El NúMERO es iMpAr')
|
[
"oscar.castro21@inacapmail.cl"
] |
oscar.castro21@inacapmail.cl
|
a58d0e15227f824cf975cf043251a109d90716a5
|
27681781bbc5a790a4e951dfce7394d7a25c45f2
|
/test/corrtab.py
|
7ea87966079ccc8c4e4b78b42b2ca2540f114177
|
[] |
no_license
|
xyzw/stieltjes
|
74b1288026faf68e57264a27461e3b680147e6e7
|
50bb0b8d4487eef88499f74ece9403af2b387be5
|
refs/heads/master
| 2021-01-01T16:20:23.302222
| 2012-09-24T11:47:04
| 2012-09-24T11:47:04
| 2,294,049
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,818
|
py
|
import sys
sys.path.append('../lib')
import time
import argparse
from recurrence import *
from decomp import *
from quad import *
from sti import *
from itertools import product
from poly import *
from ppoly import *
from fea1d import *
from egrhs import *
from aposteriori import *
def average(x):
assert len(x) > 0
return fsum(x) / mpf(len(x))
def pearson(x, y):
assert len(x) == len(y)
n = len(x)
assert n > 0
avg_x = average(x)
avg_y = average(y)
diffprod = 0
xdiff2 = 0
ydiff2 = 0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff * ydiff
xdiff2 += xdiff * xdiff
ydiff2 += ydiff * ydiff
return diffprod / sqrt(xdiff2 * ydiff2)
mp.dps = 15
kappa2 = 1
a=-1
b=1
bt = 0
s = 100
exact = arctanjump(s)
dexact = arctanjumpder(s)
rhs = arctanjumprhs(s,kappa2)
ad = exact(a)
bd = exact(b)
#d = 10
#exact = lambda x : sin(pi*d*x)/(pi*d*x) if x != 0 else 1.0
#dexact = lambda x : (pi*d*x*cos(pi*d*x)-sin(pi*d*x))/(pi*d*x**2) if x != 0 else 0
#rhs = rhsmpmathquad(lambda x : (2*pi*d*x*cos(pi*d*x)-(2-pi**2*d**2*x**2)*sin(pi*d*x))/(pi*d*x**3) + kappa2*sin(pi*d*x)/(pi*d*x) if x != 0 else kappa2)
#ad = exact(a)
#bd = exact(b)
print "{0:>6s} & {1:>6s} & {2:>6s} & {3:>6s} & {4:>25s} & {5:>25s} & {6:>25s} & {7:>25s} & {8:>10s} & {9:>10s} \\\\".format(\
"nel","p","iapnel","iapp","rhoneufa","rhobub","rhobuba","rhoxnbub","dtneufa","dtbub")
for neli in range(10,20):
for pi in range(1,2):
for iapneli in range(3,4):
for iappi in range(3,4):
X = linspace(a,b,neli+1)
ab = r_jacobi(pi+1)
xw = gauss(ab)
#print ">>>> Finite element analysis for p=", p
phi = lagrangecheby(pi)
els,G,x,phi = fea1dh(X, lagrangecheby(pi), kappa2, bt, [ad,bd], rhs)
uh = ppolyfea1sol(els,G,x,phi)
#print ">>>> Implicit a posteriori error estimation"
duh = ppolyder(uh)
dduh = ppolyder(duh)
#print ">>>>>> Neumann problem with exact derivatives"
#t1 = time.time()
#errhhneu,errhhneunorm = iapneu(els,G,x,phi,kappa2,rhs,duX,iapneli,iappi)
#t2 = time.time()
#print ">>>>>> Neumann problem with face averaging"
t3 = time.time()
errhhneufa,errhhneufanorm = iapneu(els,G,x,phi,kappa2,rhs,None,iapneli,iappi)
t4 = time.time()
#print ">>>>>> Dirichlet problem with bubble polynomials"
#errhhbub,errhhbubnorm,tbub = iapbub(els,G,x,phi,kappa2,rhs,iapneli,iappi)
# errhhbubxn,errhhbubxnnorm,tbubxn = iapbub(els,G,x,phi,kappa2,rhs,iapneli,iappi,False)
# errhhbubxnspread,errhhbubxnspreadnorm,tbubxnspread = iapbubspread(els,G,x,phi,kappa2,rhs,iapneli,iappi,False)
errhhbubortho,errhhbuborthonorm,tbubortho = iapbubortho(els,G,x,phi,kappa2,rhs,iapneli,iappi)
errhhbuba,errhhbubanorm,tbuba = iapbuba(els,G,x,phi,kappa2,rhs,iappi)
errhhbubxnortho,errhhbubxnorthonorm,tbubxnortho = iapbubxnortho(els,G,x,phi,kappa2,rhs,iapneli,iappi)
errhnorm = ppolyerrh1norm2intv(uh,exact,dexact)
#for e in range(len(els)):
# print "{0:>2d} {1:>15s} {2:>15s} {3:>15s} {4:>15s}".format(e, nstr(errhhneunorm[e]), nstr(errhhneunorm[e]), nstr(errhhneufanorm[e]), nstr(errhhbubnorm[e]))
print "{0:>6d} & {1:>6d} & {2:>6d} & {3:>6d} & {4:>25s} & {5:>25s} & {6:>25s} & {7:>25s} & {8:>10f} & {9:>10f} \\\\".\
format(neli,pi,iapneli,iappi, pearson(errhhneufanorm,errhnorm), pearson(errhhbuborthonorm,errhnorm),pearson(errhhbubanorm,errhnorm), pearson(errhhbubxnorthonorm,errhnorm),\
t4-t3, tbubortho)
## print "{0:>6d} & {1:>6d} & {2:>6d} & {3:>6d} & {4:>25s} & {5:>25s} & {6:>25s} & {7:>25s} & {8:>25s} & {9:>10f} & {10:>10f} & {11:>10f} & {12:>10f} & {13:>10f}\\\\".\
## format(neli,pi,iapneli,iappi,\
## pearson(errhhneufanorm,errhnorm),\
## pearson(errhhbubnorm,errhnorm), \
## pearson(errhhbubxnnorm,errhnorm), \
## pearson(errhhbubxnspreadnorm,errhnorm),\
## pearson(errhhbuborthonorm,errhnorm), t4-t3, tbub, tbubxn, tbubxnspread, tbubortho)
#print "{0:f} {1:f} {2:f}".format(t2-t1, t4-t3, tbub)
#print "rho(exact,neudexact) =", pearson(errhhneunorm,errhnorm)
#print "rho(exact,neufa) =", pearson(errhhneufanorm,errhnorm)
#print "rho(exact,bub) =", pearson(errhhbubnorm,errhnorm)
|
[
"csirik@gmail.com"
] |
csirik@gmail.com
|
e773b2dbd7a43207f92aa9696cde9dea110e4f90
|
0405f4d68185e6f8574e9c1d557b2bf1f9538ea8
|
/blog/migrations/0003_comment_useremail.py
|
8070d60d75ff450c76d0c4094b6e948aa683aab9
|
[
"MIT"
] |
permissive
|
WolfWW/django-simple-blog
|
f7e13a57d3df294e1e8e2581bf700a65a3c209e0
|
18cc2f62efc2ff1405c83a89d05fecf685ded8b0
|
refs/heads/master
| 2021-01-19T21:47:27.442633
| 2017-04-25T06:37:56
| 2017-04-25T06:37:56
| 88,706,418
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-25 04:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20170419_1945'),
]
operations = [
migrations.AddField(
model_name='comment',
name='useremail',
field=models.EmailField(blank=True, max_length=100, verbose_name='邮件地址'),
),
]
|
[
"ww45908757@gmail.com"
] |
ww45908757@gmail.com
|
f510140ce86e0004f9de5690e470a58ef766254f
|
9731214d2d7c1b8a09392c5c257efc4ac6eb2879
|
/userbot/plugins/alive.py
|
3695334bca880cf097ddc92d00eab8289a0b85b6
|
[
"Apache-2.0"
] |
permissive
|
LIMITLESS-IO/X-tra-Telegram
|
42f6ad9604142285c7b90e5fe98f852a3245badb
|
1c81c82cc51863bbf0989e3a6048006f402be099
|
refs/heads/master
| 2022-11-13T21:18:19.448133
| 2020-07-02T20:49:06
| 2020-07-02T20:49:06
| 276,730,400
| 0
| 0
|
Apache-2.0
| 2020-07-02T19:34:13
| 2020-07-02T19:34:12
| null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
"""Check if userbot alive. If you change these, you become the gayest gay such that even the gay world will disown you."""
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from platform import uname
from userbot import ALIVE_NAME
from userbot.utils import admin_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "**No Name set yet.** [Check Guide.](https://how2techy.com/xtra-guide1/)"
@command(outgoing=True, pattern="^.alive$")
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
await alive.edit("`Currently Alive, my peru master!` **ψ(`∇´)ψ**\n\n"
"`Telethon version: 6.9.0\nPython: 3.7.3\n`"
"`Time to flex on them master`"
|
[
"noreply@github.com"
] |
LIMITLESS-IO.noreply@github.com
|
8bcf316682b3a0cb98ced94ef57f7ff6d0167144
|
87837f5dc02c11907c8f921d84de4ee73784f687
|
/GIS MAPS.py
|
c043e0f9172859397637aa847462a7f00826f6e4
|
[] |
no_license
|
nhingorany/nei-scrape-rev.2
|
ce80d5fa22b10dda61203737392338130854ac92
|
940059500860efbb8a74801880ba3f13225a3126
|
refs/heads/master
| 2020-03-16T05:05:45.177286
| 2018-05-07T23:02:16
| 2018-05-07T23:02:16
| 132,525,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,394
|
py
|
#gis maps
import time
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.keys import Keys
import time
from time import sleep
import datetime
from PIL import Image
import re
import os
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
print ("program search vision apprisal in Portsmouth, Middletown, Tiverton, Newport; gets gis and aerial")
print ("if using Tiverton use full name e.g. road, way, lane")
print ("if using Portsmouth Middletown or Newport use abbrev. eg. RD, WAY, LN")
print ("litteral searches for address only - not for use with vacant land")
address = input("Enter Address")
town = input("Enter Town, no spaces, and capitalize first letter")
state = input("Enter Two Letter State")
print("open town page" + town)
driver = webdriver.Firefox(executable_path=r'c:\gecko\geckodriver.exe')
driver = webdriver.Firefox()
if town == 'Portsmouth':
gis = 'http://www.mainstreetmaps.com/ri/portsmouth/public.asp'
elif town == 'Tiverton':
gis = 'http://www.mainstreetmaps.com/ri/tiverton/public.asp'
elif town == 'Newport': #arcgis
gis = 'http://newportri.maps.arcgis.com/apps/webappviewer/index.html?id=78f7978f5667474da76d2533481662e4'
elif town == 'Middletown': #tighebond
gis = 'http://hosting.tighebond.com/MiddletownRI_Public/index.html'
else:
print ('you spelled the Town name wrong or did not capitalize dummy OR town not in gis index')
driver = webdriver.Firefox()
driver.maximize_window()
#town list
mainst = ["Portsmouth", "Tiverton", "Warren"]
arcgis = ["Newport"]
tighebond = ["Middletown"]
if town in mainst:
print("Using MainStMaps GIS")
driver.get(gis)
print ("searching for")
print (town)
print (gis)
element = driver.find_element_by_id('d_disc_ok')
element.click()
elem = driver.find_element_by_id('s_location')
elem.clear()
elem.send_keys(address)
elem.send_keys(Keys.DOWN)
elem.send_keys(Keys.RETURN)
time.sleep(3)
elem = driver.find_element_by_id('baselayers')
elem.send_keys(Keys.DOWN)
elem.send_keys(Keys.RETURN)
time.sleep(3)
driver.get_screenshot_as_file('tempgis.png')
print ("tempgis screenshot saved")
elif town in arcgis: #arcgis search
print("Using ARCGIS GIS")
driver.get(gis)
print ("searching for")
print (town)
print (gis)
time.sleep(3)
element = driver.find_element_by_xpath('//*[@id="esri_dijit_Search_0_input"]') #click search bar
element.click()
element.send_keys(address)
#elem.send_keys(Keys.UP)
element.send_keys(Keys.RETURN)
time.sleep(2)
element = driver.find_element_by_xpath('//*[@id="widgets_ZoomSlider_Widget_31"]/div[1]') #one click zoom in
element.click()
element = driver.find_element_by_xpath('//*[@id="widgets_ZoomSlider_Widget_31"]/div[1]') #one click zoom in
element.click()
element = driver.find_element_by_xpath('//*[@id="widgets_ZoomSlider_Widget_31"]/div[1]') #one click zoom in
element.click()
time.sleep(3)
driver.get_screenshot_as_file('tempgis.png')
print ("tempgis screenshot saved")
elif town in tighebond: #tighebond search #lot more to to click easements, etc. LOT OF INFO
print("Using tighebond GIS")
driver.get(gis)
print ("searching for")
print (town)
print (gis)
time.sleep(3)
element = driver.find_element_by_xpath('//*[@id="searchinput"]') #click search bar
element.click()
element.send_keys(address)
#elem.send_keys(Keys.UP)
element.send_keys(Keys.RETURN)
time.sleep(3)
element = driver.find_element_by_xpath('//*[@id="tabbasemap"]/button/div') #click layer bar
element.click()
element = driver.find_element_by_xpath('//*[@id="baseMapGallery"]/li[4]/a/img') #click googlem map bar
element.click()
time.sleep(3)
driver.get_screenshot_as_file('tempgis.png')
print ("tempgis screenshot saved")
else:
print("town not in gis list")
driver.quit()
#END GIS ---------------------------
|
[
"nhingorany@nei-cds.com"
] |
nhingorany@nei-cds.com
|
5ae5dec9f0f5637993704be56516bdd59dc07a12
|
cf19b96eb917eca1208627edc27c698d249e859b
|
/build/my_robot/catkin_generated/pkg.installspace.context.pc.py
|
1bb9da82786351df9f08ed8d41efa5effb23bfc8
|
[] |
no_license
|
Shubham-Tandon/ROBOND_WhereAmI
|
82865453acd7f345e4890770728de8b66cbaf183
|
c18fa676b7cdf3cc2e7f0381631b276b3da48b65
|
refs/heads/master
| 2022-07-31T04:00:14.208578
| 2020-05-22T21:29:55
| 2020-05-22T21:29:55
| 265,763,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "my_robot"
PROJECT_SPACE_DIR = "/home/workspace/ROBOND_WhereAmI/install"
PROJECT_VERSION = "0.0.0"
|
[
"standon@ncsu.edu"
] |
standon@ncsu.edu
|
04785185c6a79bfd8c3fc3be0905894947987228
|
ad5a9e3f3b0c5be34fd8d3881f0411c6dba04df4
|
/gen.py
|
08365569737adb0bd8710198ea849d9973507f2c
|
[] |
no_license
|
Igor-chest/Labirinth_project
|
29d929537cd218d105c4a2511872d48f4a26ce53
|
d2f25b234917326997e939e30689ced41091331a
|
refs/heads/master
| 2020-08-08T01:50:35.712910
| 2019-12-17T06:49:37
| 2019-12-17T06:49:37
| 213,665,560
| 2
| 1
| null | 2019-10-13T07:44:45
| 2019-10-08T14:28:32
|
Python
|
UTF-8
|
Python
| false
| false
| 10,533
|
py
|
import random
# КОНСТАНТЫ
green = '\033[92m'
grey = '\033[90m'
end = '\033[0m'
ways = [[0, 1], [1, 0], [0, -1],
[-1, 0]] # соответствует 4 направлениям вокруг клетки при прибавлении к координатам(const)
angles = [[1, 1], [1, -1], [-1, 1],
[-1, -1]] # дополнение к ways. соответствует 4 углам вокруг клетки при прибавлении к координатам
'''
Функция line создаёт 1 ответвление от лабиринта за вызов.
Ответвление генерируется начиная со случайного незанятого места и заканчивается когда соеденится с уже сгенерированной
частью лабиринта(при первом вызове это только клетка start)
на вход подаётся:
x,y - размер лабиринта по x и по y
zCount - кол-во не занятых клеток
point - текущая позиция(начальная)
'''
def line(map, x, y, zCount, point):
saveCount = zCount
sym = 2
nSym = 3
road = [0, 1]
block = 4
ink = 5
# ^ константы
flag = True
map[point[0]][point[1]] = ink
while flag: # поклеточно генерирует ответвление
way = 0
pWays = ways.copy()
for i in ways:
if map[point[0] + i[0]][point[1] + i[1]] == sym: # пока не встретит остальной лабиринт
flag = False
way = i
break
while flag:
if not pWays:
labPrint(map, x, y)
for i in range(x): # удаляет ответвление из-за ошибки генерации
for j in range(y):
if map[i][j] == 4:
map[i][j] = 0
if map[i][j] == 5:
map[i][j] = 0
return saveCount
r = pWays[random.randint(0, len(pWays) - 1)]
if map[point[0] + r[0]][point[1] + r[1]] in road and choose(map, x, y, point, [r[0], r[1]], sym, road,
nSym):
way = r
break
pWays.remove(r)
zCount = step(map, point, way, zCount, block, ink)
point = [point[0] + way[0], point[1] + way[1]]
for i in range(x): # делает ответвление частью лабиринта
for j in range(y):
if map[i][j] == 4:
map[i][j] = 1
if map[i][j] == 5:
map[i][j] = 2
return zCount
'''
генерирует пустое поле для лабиринта размером x на y
'''
def gen(x, y):
nMap = [0] * x
j = 0
for i in nMap:
nMap[j] = [0] * y
j += 1
return nMap
'''
удлиняет ответвление на 1 клетку.
point - текущая позиция. [x,y]
way - направление шага. [0,-1/1]/[-1/1,0](влево/вправо/вверх/вниз)
zCount - кол-во незанятого места
block - обозначение для клетки новой стенки в массиве
ink - обозначение для клетки новой дороги в массиве
'''
def step(map, point, way, zCount, block, ink):
for i in ways, angles:
for j in i: # застраивает клетку стенками
if map[point[0] + j[0]][point[1] + j[1]] == 0 and (
(way[0] and way[0] != j[0]) or (way[1] and way[1] != j[1])):
map[point[0] + j[0]][point[1] + j[1]] = block
zCount -= 1
if map[point[0] + way[0]][point[1] + way[1]] == 0:
zCount -= 1
map[point[0] + way[0]][point[1] + way[1]] = ink
return zCount
'''
проверяет есть ли путь у ветки до лабиринта
x,y - размер лабиринта по x и по y
point - текущая позиция. [x,y]
way - направление шага. [0,-1/1]/[-1/1,0](влево/вправо/вверх/вниз)
sym - обозначение для клетки старой дороги в массиве
nSym - обозначение для клетки новой дороги в массиве
road - клетки через которые можно проводить новое ответвление.
0 - незанятые, 1 - старая стенка(для присоединения к остальному лабиринту)
'''
def choose(map, x, y, point, way, sym, road, nSym):
tPoint = [point[0] + way[0], point[1] + way[1]]
nMap = gen(x, y)
contacts = set()
flag = True
for i in ways, angles:
for j in i:
if (way[0] and way[0] != j[0]) or (way[1] and way[1] != j[1]):
nMap[point[0] + j[0]][point[1] + j[1]] = nSym
if not map[point[0] + j[0]][point[1] + j[1]] and flag:
for k in ways, angles:
for c in k:
if map[point[0] + j[0] + c[0]][point[1] + j[1] + c[1]] == 9:
flag = False
elif map[point[0] + j[0] + c[0]][point[1] + j[1] + c[1]] == 4:
contacts.add((point[0] + j[0] + c[0], point[1] + j[1] + c[1]))
if flag:
goodCon = 0
# print(contacts,'!')
truePos = []
for i in ways:
if map[point[0] + i[0]][point[1] + i[1]] != 5 and i != way:
truePos.append((point[0] + i[0], point[1] + i[1]))
# print(truePos,point,way)
for i in contacts:
flag = True
if i in truePos:
# print("Zashel 1")
break
n = 1
pos = [i]
while n and flag:
n -= 1
newPos = []
for j in pos:
for k in ways:
if (j[0] + k[0], j[1] + k[1]) in truePos:
# print("Zashel 2")
goodCon += 1
flag = False
break
if map[j[0] + k[0]][j[1] + k[1]] == 4 and map[j[0] + k[0]][j[1] + k[1]] not in pos:
newPos.append((j[0] + k[0], j[1] + k[1]))
if not flag:
break
pos = newPos
if goodCon == len(contacts):
# print(goodCon)
return True
# else:
# print("not",goodCon)
nMap[tPoint[0]][tPoint[1]] = nSym
possible = [tPoint]
flag = False
for i in possible: # цикл проходит по массиву координат possible, в то же время добавляя в него пустые клетки с 4
if flag: # сторон от текущей. Если все возможные клетки пройдены, то значит пути до лабиринта нет.
break
for j in ways:
if map[i[0] + j[0]][i[1] + j[1]] == sym:
flag = True
break
if nMap[i[0] + j[0]][i[1] + j[1]] == 0 and map[i[0] + j[0]][i[1] + j[1]] in road:
nMap[i[0] + j[0]][i[1] + j[1]] = nSym
possible.append([i[0] + j[0], i[1] + j[1]])
return flag
def labGen(x, y): # MAIN
map = gen(x, y)
start = [random.randint(1, x - 2), 1]
zCount = (x - 2) * (y - 2) - 2
plain = zCount # кол-во пустых клеток в пустом поле для лабиринта
finish = [random.randint(1, x - 2), y - 2]
for i in range(x):
for j in range(y):
if i == x - 1 or i == 0 or j == y - 1 or j == 0:
map[i][j] = 9
map[start[0]][start[1]] = 2
while zCount > 0:
# print((str)((int)((plain - zCount) / plain * 100)) + '%')
flag = False
r = random.randint(0, zCount - 1)
for i in range(x):
for j in range(y):
if map[i][j] == 0:
if r:
r -= 1
else:
zCount -= 1
zCount = line(map, x, y, zCount, [i, j])
flag = True
break
if flag:
break
if not flag:
print('/', zCount)
break
line(map, x, y, zCount, finish)
map[start[0]][0] = 2
map[finish[0]][y - 1] = 2
map[start[0]][start[1]] = 3
map[finish[0]][finish[1]] = 'f'
return map
'''
генерирует и сохраняет лабиринт в фаил
х - высота
у - ширина
fName - имя файла, куда сохранять
'''
def oldSaveGen(fName, x, y):
map = labGen(x, y)
file = open(fName, 'w')
file.write(str(y) + '\n')
for i in range(x):
for j in range(y):
file.write(str(map[i][j]))
file.close()
def saveGen(player):
changed_file_save = []
with open('save.txt', 'r') as f: # ищем в файле очки игрока
for string in f:
if player in string:
items = string.split()
x = int(items[1])
else:
changed_file_save.append(string.rstrip()) # .rstrip() удаляет символ \n
changed_file_save.append(player + ' ' + str(x + 2)) # добавляем в файл информацию об очках игрока
open('save.txt', 'w').write("\n".join(changed_file_save) + '\n')
def downloadGen(player):
changed_file_load = []
with open('save.txt', 'r') as f: # ищем в файле очки игрока
for string in f:
if player in string:
items = string.split()
x = int(items[1])
else:
changed_file_load.append(string.rstrip()) # .rstrip() удаляет символ \n
return (x)
def labPrint(map, x, y):
for i in range(x):
for j in range(y):
if map[i][j] in (2, 5):
print(green + '█' + end, end='')
else:
print(grey + '█' + end, end='')
print()
|
[
"noreply@github.com"
] |
Igor-chest.noreply@github.com
|
94c72bf81a4cebf053d1787963b219571fdb5425
|
1a7adc0e05c953c2d97b897adb074b8745108608
|
/train_ofa_net_cifar10_simple.py
|
e2b736c16099360aab1b36ec165f4d0e264da2dc
|
[
"Apache-2.0"
] |
permissive
|
twice154/ofa-for-super-resolution
|
61992049d6c7ac381130b4cd2df8ba48aa237a9b
|
cc3b4e129c7e92f7a16eb669db24aac120be1431
|
refs/heads/master
| 2023-06-19T22:04:12.215389
| 2021-07-01T07:30:27
| 2021-07-01T07:30:27
| 277,010,250
| 13
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,437
|
py
|
# Once for All: Train One Network and Specialize it for Efficient Deployment
# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han
# International Conference on Learning Representations (ICLR), 2020.
import argparse
import numpy as np
import os
import random
# import horovod.torch as hvd
import torch
from ofa.elastic_nn.modules.dynamic_op import DynamicSeparableConv2d
from ofa.elastic_nn.networks import OFAMobileNetV3
from ofa.imagenet_codebase.run_manager import DistributedImageNetRunConfig
from ofa.imagenet_codebase.run_manager.distributed_run_manager import DistributedRunManager
from ofa.imagenet_codebase.data_providers.base_provider import MyRandomResizedCrop
from ofa.utils import download_url
from ofa.elastic_nn.training.progressive_shrinking import load_models
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='depth', choices=[
'kernel', 'depth', 'expand',
])
parser.add_argument('--phase', type=int, default=1, choices=[1, 2])
args = parser.parse_args()
if args.task == 'kernel':
args.path = 'exp/normal2kernel'
args.dynamic_batch_size = 1
args.n_epochs = 120
args.base_lr = 3e-2
args.warmup_epochs = 5
args.warmup_lr = -1
args.ks_list = '3,5,7'
args.expand_list = '6'
args.depth_list = '4'
elif args.task == 'depth':
args.path = 'exp/kernel2kernel_depth/phase%d' % args.phase
args.dynamic_batch_size = 2
if args.phase == 1:
args.n_epochs = 25
args.base_lr = 2.5e-3
args.warmup_epochs = 0
args.warmup_lr = -1
args.ks_list = '3,5,7'
args.expand_list = '6'
args.depth_list = '3,4'
else:
args.n_epochs = 120
args.base_lr = 7.5e-3
args.warmup_epochs = 5
args.warmup_lr = -1
args.ks_list = '3,5,7'
args.expand_list = '6'
args.depth_list = '2,3,4'
elif args.task == 'expand':
args.path = 'exp/kernel_depth2kernel_depth_width/phase%d' % args.phase
args.dynamic_batch_size = 4
if args.phase == 1:
args.n_epochs = 25
args.base_lr = 2.5e-3
args.warmup_epochs = 0
args.warmup_lr = -1
args.ks_list = '3,5,7'
args.expand_list = '4,6'
args.depth_list = '2,3,4'
else:
args.n_epochs = 120
args.base_lr = 7.5e-3
args.warmup_epochs = 5
args.warmup_lr = -1
args.ks_list = '3,5,7'
args.expand_list = '3,4,6'
args.depth_list = '2,3,4'
else:
raise NotImplementedError
args.manual_seed = 0
args.lr_schedule_type = 'cosine'
args.base_batch_size = 64
args.valid_size = 10000
args.opt_type = 'sgd'
args.momentum = 0.9
args.no_nesterov = False
args.weight_decay = 3e-5
args.label_smoothing = 0.1
args.no_decay_keys = 'bn#bias'
args.fp16_allreduce = False
args.model_init = 'he_fout'
args.validation_frequency = 1
args.print_frequency = 10
args.n_worker = 8
args.resize_scale = 0.08
args.distort_color = 'tf'
args.image_size = '128,160,192,224'
args.continuous_size = True
args.not_sync_distributed_image_size = False
args.bn_momentum = 0.1
args.bn_eps = 1e-5
args.dropout = 0.1
args.base_stage_width = 'proxyless'
args.width_mult_list = '1.0'
args.dy_conv_scaling_mode = 1
args.independent_distributed_sampling = False
args.kd_ratio = 1.0
args.kd_type = 'ce'
if __name__ == '__main__':
os.makedirs(args.path, exist_ok=True)
# Initialize Horovod
hvd.init()
# Pin GPU to be used to process local rank (one GPU per process)
torch.cuda.set_device(hvd.local_rank())
args.teacher_path = download_url(
'https://hanlab.mit.edu/files/OnceForAll/ofa_checkpoints/ofa_D4_E6_K7',
model_dir='.torch/ofa_checkpoints/%d' % hvd.rank()
)
num_gpus = hvd.size()
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed_all(args.manual_seed)
np.random.seed(args.manual_seed)
random.seed(args.manual_seed)
# image size
args.image_size = [int(img_size) for img_size in args.image_size.split(',')]
if len(args.image_size) == 1:
args.image_size = args.image_size[0]
MyRandomResizedCrop.CONTINUOUS = args.continuous_size
MyRandomResizedCrop.SYNC_DISTRIBUTED = not args.not_sync_distributed_image_size
# build run config from args
args.lr_schedule_param = None
args.opt_param = {
'momentum': args.momentum,
'nesterov': not args.no_nesterov,
}
args.init_lr = args.base_lr * num_gpus # linearly rescale the learning rate
if args.warmup_lr < 0:
args.warmup_lr = args.base_lr
args.train_batch_size = args.base_batch_size
args.test_batch_size = args.base_batch_size * 4
run_config = DistributedImageNetRunConfig(**args.__dict__, num_replicas=num_gpus, rank=hvd.rank())
# print run config information
if hvd.rank() == 0:
print('Run config:')
for k, v in run_config.config.items():
print('\t%s: %s' % (k, v))
if args.dy_conv_scaling_mode == -1:
args.dy_conv_scaling_mode = None
DynamicSeparableConv2d.KERNEL_TRANSFORM_MODE = args.dy_conv_scaling_mode
# build net from args
args.width_mult_list = [float(width_mult) for width_mult in args.width_mult_list.split(',')]
args.ks_list = [int(ks) for ks in args.ks_list.split(',')]
args.expand_list = [int(e) for e in args.expand_list.split(',')]
args.depth_list = [int(d) for d in args.depth_list.split(',')]
net = OFAMobileNetV3(
n_classes=run_config.data_provider.n_classes, bn_param=(args.bn_momentum, args.bn_eps),
dropout_rate=args.dropout, base_stage_width=args.base_stage_width, width_mult_list=args.width_mult_list,
ks_list=args.ks_list, expand_ratio_list=args.expand_list, depth_list=args.depth_list
)
# teacher model
if args.kd_ratio > 0:
args.teacher_model = OFAMobileNetV3(
n_classes=run_config.data_provider.n_classes, bn_param=(args.bn_momentum, args.bn_eps),
dropout_rate=0, width_mult_list=1.0, ks_list=7, expand_ratio_list=6, depth_list=4,
)
args.teacher_model.cuda()
""" Distributed RunManager """
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none
distributed_run_manager = DistributedRunManager(
args.path, net, run_config, compression, backward_steps=args.dynamic_batch_size, is_root=(hvd.rank() == 0)
)
distributed_run_manager.save_config()
# hvd broadcast
distributed_run_manager.broadcast()
# load teacher net weights
if args.kd_ratio > 0:
load_models(distributed_run_manager, args.teacher_model, model_path=args.teacher_path)
# training
from ofa.elastic_nn.training.progressive_shrinking import validate, train
validate_func_dict = {'image_size_list': {224} if isinstance(args.image_size, int) else sorted({160, 224}),
'width_mult_list': sorted({0, len(args.width_mult_list) - 1}),
'ks_list': sorted({min(args.ks_list), max(args.ks_list)}),
'expand_ratio_list': sorted({min(args.expand_list), max(args.expand_list)}),
'depth_list': sorted({min(net.depth_list), max(net.depth_list)})}
if args.task == 'kernel':
validate_func_dict['ks_list'] = sorted(args.ks_list)
if distributed_run_manager.start_epoch == 0:
model_path = download_url('https://hanlab.mit.edu/files/OnceForAll/ofa_checkpoints/ofa_D4_E6_K7',
model_dir='.torch/ofa_checkpoints/%d' % hvd.rank())
load_models(distributed_run_manager, distributed_run_manager.net, model_path=model_path)
distributed_run_manager.write_log('%.3f\t%.3f\t%.3f\t%s' %
validate(distributed_run_manager, **validate_func_dict), 'valid')
train(distributed_run_manager, args,
lambda _run_manager, epoch, is_test: validate(_run_manager, epoch, is_test, **validate_func_dict))
elif args.task == 'depth':
from ofa.elastic_nn.training.progressive_shrinking import supporting_elastic_depth
supporting_elastic_depth(train, distributed_run_manager, args, validate_func_dict)
else:
from ofa.elastic_nn.training.progressive_shrinking import supporting_elastic_expand
supporting_elastic_expand(train, distributed_run_manager, args, validate_func_dict)
|
[
"terranada@naver.com"
] |
terranada@naver.com
|
30c35d3899226c779a020dab23727f1b0f1ac4e5
|
1922dadca6c0afac668cd738ba30418ed59faaa2
|
/bench/unitbench.py
|
5f34376031eebdaed00061236cb4b3dfebb1d58f
|
[
"MIT"
] |
permissive
|
timgates42/python-jwt
|
ba25c6112fa5a9188dc8918f0dd5b334af9d50c5
|
fedc67701cceaa9d0756352fab9f09d04ff21dca
|
refs/heads/master
| 2023-03-18T11:14:57.023652
| 2021-12-27T14:00:57
| 2021-12-27T14:00:57
| 509,876,946
| 0
| 0
|
MIT
| 2022-07-02T22:51:32
| 2022-07-02T22:51:32
| null |
UTF-8
|
Python
| false
| false
| 10,548
|
py
|
'''
Copyright (c) 2011, Joseph LaFata
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the unitbench nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import division
import inspect
import math
import os
import re
import string
import sys
import time
if sys.version_info < (3, 0):
maxint = sys.maxint
fname = "func_name"
else:
xrange = range
maxint = sys.maxsize
fname = "__name__"
if sys.platform == "win32":
get_time = time.clock
else:
get_time = time.time
class TimeSet(object):
__slots__ = ["wall", "user", "system"]
def __init__(self, wall, user, system):
self.wall = wall
self.user = user
self.system = system
class BenchResult(object):
"""
This contains the results of running a benchmark.
It contains the mean, variance, standard deviation, minimum,
and maximum. Each of these attributes are available for
user, wall, and system time.
Times:
* wall
* user
* system
Statistics:
* min
* max
* mean
* variance
* std_dev (standard deviation)
Combine one of the times and one of the statistics to get
the appropriate attribute.
Examples:
* wall_mean - Mean wall clock time
* user_std_dev - Standard deviation of user time
* system_variance - Variance of system (kernel) time
* wall_min - Minimum wall clock time
* and so on...
"""
def __init__(self, name, value, times):
self.name = name
self.value = str(value)
if len(times) == 0:
return
time_types = ["wall", "user", "system"]
for type in time_types:
minimum = maxint
maximum = -maxint
count = 0
sum = 0
sum_2 = 0
mean = 0.0
variance = 0.0
std_dev = 0.0
for t in times:
currentTime = getattr(t, type)
count += 1
sum += currentTime
sum_2 += currentTime ** 2
minimum = min(currentTime, minimum)
maximum = max(currentTime, maximum)
if len(times) > 0:
mean = sum / count
variance = (sum_2 / count) - (mean ** 2)
if variance < 0.0:
variance = 0.0
std_dev = math.sqrt(variance)
setattr(self, type+"_min", minimum)
setattr(self, type+"_max", maximum)
setattr(self, type+"_mean", mean)
setattr(self, type+"_variance", variance)
setattr(self, type+"_stddev", std_dev)
class Benchmark(object):
"""
"""
def setup(self):
"Hook method called once before every run of each benchmark."
pass
def teardown(self):
"Hook method called once after every run of each benchmark."
pass
def input(self):
"""Hook method for providing the input to the benchmark. None
should not be passed into a benchmark function. It is used
as a marker that no arguments are present.
A list containing only 0 is used as a default.
"""
return [0]
def repeats(self):
"Hook method for the number of times to repeat the benchmark (default 7)"
return 7
def warmup(self):
"""Hook method for the number of warmup runs to do (default 1)
The warmup will run for each benchmark function with each value.
"""
return 1
def run(self, reporter=None):
"""This should generally not be overloaded. Runs the benchmark functions
that are found in the child class.
"""
if not reporter: reporter = ConsoleReporter()
benchmarks = sorted(self._find_benchmarks())
reporter.write_titles(map(self._function_name_to_title, benchmarks))
for value in self.input():
results = []
for b in benchmarks:
method = getattr(self, b)
arg_count = len(inspect.getargspec(method)[0])
if arg_count == 2:
results.append(self._run_benchmark(method, value))
elif arg_count == 1:
results.append(self._run_benchmark(method))
reporter.write_results(str(value), results)
def _run_benchmark(self, method, value=None):
# warmup the function
for i in xrange(self.warmup()):
self.setup()
try:
if value != None:
method(value)
else:
method()
except:
self.teardown()
raise
# run the benchmark
times = []
for i in xrange(self.repeats()):
self.setup()
try:
start_user_system = os.times()
start = get_time()
if value != None:
method(value)
else:
method()
end = get_time()
end_user_system = os.times()
t = TimeSet(end-start,
end_user_system[0]-start_user_system[0],
end_user_system[1]-start_user_system[1])
times.append(t)
except:
self.teardown()
raise
self.teardown()
return BenchResult(self._function_name_to_title(getattr(method, fname)), value, times)
def _find_benchmarks(self):
"""Return a suite of all tests cases contained in testCaseClass"""
def is_bench_method(attrname, prefix="bench"):
return attrname.startswith(prefix) and hasattr(getattr(self.__class__, attrname), '__call__')
return list(filter(is_bench_method, dir(self.__class__)))
def _function_name_to_title(self, name):
output = name
if output.startswith("bench"):
output = output[5:]
if output.find("_") != -1:
return string.capwords(output.replace("_", " ").strip())
else:
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', output)
return string.capwords(re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1).strip())
class Reporter(object):
"""
This is the base class for benchmark result reporting. If
you'd like to write a custom importer this is the class to extend.
"""
def write_titles(self, titles):
"""
Override this method if you'd like to write out the titles
at the beginning of your reporter. The CsvReporter uses
this function to output the titles at the top of each column.
Alternatively, the ConsoleReporter doesn't override this
function because it doesn't need to output the titles of
each benchmark.
"""
pass
def write_results(self, value, results):
"""
Override this method to output the results of the benchmark
run. Value is the value passed into each benchmark. Results
is a list of BenchResult objects. See the BenchResult documentation
for the information it contains.
"""
pass
class ConsoleReporter(Reporter):
def __init__(self, output_stream=sys.stdout):
self.stream = output_stream
def write_results(self, value, results):
self.stream.write("Value: {0:<33}{1:>10}{2:>10}{3:>10}\n".format(value, "user", "sys", "real"))
self.stream.write("=" * 70 + "\n")
for r in results:
if (hasattr(r, "user_mean") and
hasattr(r, "system_mean") and hasattr(r, "wall_mean")):
self.stream.write("{0:<40} {1:>9.4} {2:>9.4} {3:>9.4}\n".format(r.name,
r.user_mean, r.system_mean, r.wall_mean))
self.stream.write("\n")
class CsvReporter(Reporter):
def __init__(self, output_stream=sys.stdout, time_type="wall"):
self.stream = output_stream
self.time_type = time_type
def write_titles(self, titles):
self.stream.write("Values," + ",".join(titles))
self.stream.write("\n")
def write_results(self, value, results):
output = []
for r in results:
if hasattr(r, self.time_type+"_mean"):
output.append(str(getattr(r, self.time_type+"_mean")))
if len(output) > 0:
self.stream.write(value + "," + ",".join(output))
self.stream.write("\n")
|
[
"dahalls@gmail.com"
] |
dahalls@gmail.com
|
9497e99fc516832e4639221b33df7bf3d1d5c5c6
|
7b92157818d1d4f05a74fcc3e373eebeab4897cd
|
/algoritms_cormen/sorting/heap/max_priority_queue.py
|
360f0cbca1d1a8045b2c61e7c651900f6b231bbe
|
[] |
no_license
|
SabinoGs/algorithms
|
cc32ec1538c00a04166a77e8458b987227f0a386
|
326688e9e3e790134809092e4b7f9f5a217e28e9
|
refs/heads/master
| 2022-12-05T18:41:41.252629
| 2020-08-11T20:49:25
| 2020-08-11T20:49:25
| 281,818,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,262
|
py
|
import math
class MaxPriorityQueue():
"""
Implementa uma fila de prioridade baseada na propriedade de max_heap.
"""
def __init__(self, initial_array):
self.heap = initial_array
self.heap_size = len(initial_array)-1
self._build_heap()
def _swap_index(self, a, b):
tmp = self.heap[a]
self.heap[a] = self.heap[b]
self.heap[b] = tmp
def _right_node(self, i):
return i*2 + 2
def _left_node(self, i):
return i*2 + 1
def _parent(self, i):
return int((i-1)/2)
def _max_heapify(self, i):
while i <= self.heap_size:
left = self._left_node(i)
right = self._right_node(i)
if left <= self.heap_size and self.heap[left] > self.heap[i]:
max_index = left
else:
max_index = i
if right <= self.heap_size and self.heap[right] > self.heap[max_index]:
max_index = right
if max_index != i:
self._swap_index(i, max_index)
i = max_index
else:
break
def _build_heap(self):
for i in range(int(self.heap_size/2), -1, -1):
self._max_heapify(i)
def maximum(self):
return self.heap[0]
def extract_max(self):
"""Remove e retorna o elemento da heap com a maior Key
"""
if self.heap_size < 0:
raise ValueError("Heap underflow")
max = self.heap[0]
self.heap[0] = self.heap[self.heap_size]
self.heap.pop(self.heap_size)
self.heap_size -= 1
# reorganiza a heap após a remoção do primeiro elemento
self._max_heapify(0)
return max
def increase_key(self, i, key):
"""Aumenta o index i com a nova key.
Assume que a key > heap[i].
"""
if key < self.heap[i]:
raise ValueError("Nova key é menor que a atual")
self.heap[i] = key
# Se não obedecer a propriedade de max_heap,
# troque os elementos até que seja verdade.
while i > 0 and self.heap[self._parent(i)] < self.heap[i]:
self._swap_index(i, self._parent(i))
i = self._parent(i)
def insert(self, key):
self.heap_size += 1
# ao criar um nó novo, atribuir um valor "infinitamente pequeno"
# para poder realizar a movimentação desse novo valor para a posição
# correta na heap
self.heap.append(-math.inf)
# Mantem a propriedade de max_heap e "flutua" a nova key para o lugar certo.
# Assumindo que a heap ja está num estado de max_heap
self.increase_key(self.heap_size, key)
if __name__ == "__main__":
array1 = [15, 13, 9, 5, 12, 8, 7, 4, 0, 6, 2, 1]
queue = MaxPriorityQueue(array1)
print("Max - Heap: ", queue.heap)
print("Max element: ", queue.maximum())
print("Pop first: ", queue.extract_max())
print("Heap after pop: ", queue.heap)
print("Max heap, after pop: ", queue.maximum())
queue.increase_key(0, 15)
print("Increase key 13 to 15: ", queue.heap)
queue.insert(100)
print("Insert 100 in heap: ", queue.heap)
|
[
"gustavo.sabino.contact@gmail.com"
] |
gustavo.sabino.contact@gmail.com
|
ddeab8c52230720ee2a47828e57178f8851cb9b0
|
0dabc76a258156ea069a41556afa38656c0cb909
|
/basics/data_json_people.py
|
06e6269793a166beeb421f209ba16dc2c39e0c4c
|
[
"MIT"
] |
permissive
|
chen0040/pyspark-advanced-algorithms
|
11d1422e39266b38fce4752c7f88b9518d0f9519
|
3a140767447769a7cdae59c5a28bd0e1b03c5bc6
|
refs/heads/master
| 2020-03-17T05:10:03.834489
| 2018-05-23T12:41:37
| 2018-05-23T12:41:37
| 133,304,849
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
from pyspark.sql import SparkSession
import os
import sys
import logging
def patch_path(*paths):
return os.path.join(os.path.dirname(__file__), *paths)
def main():
sys.path.append(patch_path('..'))
from pyspark_alg.os_utils import set_hadoop_home_dir, is_os_windows
from pyspark_alg.spark_utils import create_spark_session
if is_os_windows():
set_hadoop_home_dir(patch_path('..', 'win-bin'))
logging.basicConfig(level=logging.WARN)
spark = create_spark_session('JsonPerson')
df = spark.read.json(patch_path('data/person.json'))
data = df.collect()
print(type(data))
for person in data:
print(person)
if __name__ == '__main__':
main()
|
[
"xs0040@gmail.com"
] |
xs0040@gmail.com
|
07f48f1cd98ec22a17566c42cef8557cb36bb84c
|
5c7cea50c43742ed80c9cd20d231390e9bfe6b4c
|
/runfiles/eval_DAVIS_graph_memory.py
|
d898520d6a01841a7914f195009ee3641760ddcc
|
[] |
no_license
|
zxforchid/GraphMemVOS
|
dd9f01e6c503ece39f4a76c824c0984648a5e4c5
|
a3487bcf67ce479c3774890403362170f275095f
|
refs/heads/master
| 2022-12-14T23:59:18.425541
| 2020-09-10T06:10:10
| 2020-09-10T06:10:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,512
|
py
|
from __future__ import division
import sys
sys.path.append('/media/xiankai/Data/segmentation/OSVOS/ECCV_graph_memory')
import models
import torch
from torch.autograd import Variable
from torch.utils import data
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
#from torchvision import models
from numpy.random import randint
# general libs
import cv2
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import math
import time
import tqdm
import os
import argparse
import copy
import csv
### My libs
from dataset import DAVIS_MO_Test
torch.set_grad_enabled(False) # Volatile
def pad_divide_by(in_list, d, in_size):
out_list = []
h, w = in_size #input size
if h % d > 0:
new_h = h + d - h % d
else:
new_h = h
if w % d > 0:
new_w = w + d - w % d
else:
new_w = w
lh, uh = int((new_h-h) / 2), int(new_h-h) - int((new_h-h) / 2)
lw, uw = int((new_w-w) / 2), int(new_w-w) - int((new_w-w) / 2)
pad_array = (int(lw), int(uw), int(lh), int(uh))
for inp in in_list:
out_list.append(F.pad(inp, pad_array))
return out_list, pad_array
def get_arguments():
parser = argparse.ArgumentParser(description="SST")
parser.add_argument("-g", type=str, help="0; 0,1; 0,3; etc", default='0')
parser.add_argument("-c", type=str, help="checkpoint", default=' ')
parser.add_argument("-s", type=str, help="set", default="val")
parser.add_argument("-y", type=int, help="year", default="17")
parser.add_argument("-viz", help="Save visualization", action="store_true")
parser.add_argument("-D", type=str, help="path to data", default='/media/xiankai/Data/segmentation/DAVIS-2017/DAVIS-train-val')
return parser.parse_args()
args = get_arguments()
GPU = args.g
YEAR = args.y
SET = args.s
VIZ = args.viz
DATA_ROOT = args.D
# Model and version
MODEL = 'Graph-memory'
print(MODEL, ': Testing on DAVIS')
os.environ['CUDA_VISIBLE_DEVICES'] = GPU
if torch.cuda.is_available():
print('using Cuda devices, num:', torch.cuda.device_count())
if VIZ:
print('--- Produce mask overaid video outputs. Evaluation will run slow.')
print('--- Require FFMPEG for encoding, Check folder ./viz')
palette = Image.open('/media/xiankai/Data/segmentation/DAVIS-2017/DAVIS-train-val/Annotations/480p/bear/00000.png').getpalette()
class VideoRecord(object):
pass
def _sample_pair_indices(record):
"""
:param record: VideoRecord
:return: list
"""
new_length = 1
average_duration = (record.num_frames - new_length + 1) // record.num_segment
if average_duration > 0:
# offsets = np.multiply(list(range(record.num_segment)), average_duration) + randint(average_duration,size=record.num_segment)
offsets = np.multiply(list(range(record.num_segment)), average_duration) + [average_duration//2]*record.num_segment # no random
elif record.num_frames > record.num_segment:
offsets = randint(record.num_frames -
new_length + 1, size=record.num_segment)
else:
offsets = np.zeros((record.num_segment,))
return offsets
def Run_video(Fs, Ms, num_frames, num_objects, Mem_every=None, Mem_number=None):
# initialize storage tensors
num_first_memory = 1
if Mem_every:
to_memorize = [int(i) for i in np.arange(0, num_frames, step=Mem_every)]
elif Mem_number:
to_memorize = [int(round(i)) for i in
np.linspace(0, num_frames, num=Mem_number + 2)[:-1]] # [0, 5, 10, 15, 20, 25]
else:
raise NotImplementedError
# print('memory size:', len(to_memorize))
Es = torch.zeros_like(Ms) # mask
Es[:, :, 0] = Ms[:, :, 0]
record = VideoRecord()
record.num_segment = 4
for t in tqdm.tqdm(range(1, num_frames)):
# memorize
with torch.no_grad():
prev_key, prev_value = model(Fs[:, :, t - 1], Es[:, :, t - 1], torch.tensor([num_objects]))
if t - 1 == 0: #
this_keys, this_values = prev_key, prev_value # only prev memory
elif t <= record.num_segment:
this_keys = torch.cat([keys, prev_key], dim=3)
this_values = torch.cat([values, prev_value], dim=3)
# segment
with torch.no_grad():
# print('input size1:', this_keys.size(), this_values.size())# torch.Size([1, 11, 128, 1, 30, 57]) torch.Size([1, 11, 512, 1, 30, 57]) # one hot label vector with length 11
record.num_frames = t
select_keys = []
select_values = []
# print('key size:',t,this_keys.size(), this_values.size())#[1, 11, 128, 4, 30, 57]
if t > record.num_segment:
Index = _sample_pair_indices(record) if record.num_segment else []
#Index[-1]=t-1
# print('index', t, Index, type(Index))
for add_0 in range(num_first_memory):
select_keys.append(this_keys[:, :, :, 0, :, :].unsqueeze(dim=3))
select_values.append(this_values[:, :, :, 0, :, :].unsqueeze(dim=3))
# print('index0:', this_keys[:, :, :, 0, :, :].size(),this_values[:, :, :, 0, :, :].unsqueeze(dim=3).size())
for ii in Index:
prev_key1, prev_value1 = model(Fs[:, :, ii], Es[:, :, ii], torch.tensor([num_objects]))
# print('index1:', prev_key.size()) #1, 11, 128, 1, 30, 57
select_keys.append(prev_key1) # (this_keys[:, :, :, t-1, :, :])
select_values.append(prev_value1) # (this_values[:, :, :, t-1, :, :])
select_keys.append(prev_key) # (this_keys[:, :, :, t-1, :, :])
select_values.append(prev_value)
# print('index2:', select_keys[0].size())
select_keys = torch.cat(select_keys, dim=3)
select_values = torch.cat(select_values, dim=3)
# print('key size:', prev_key.size(), select_keys.size(), select_values.size())
else:
select_keys = this_keys
select_values = this_values
logit = model(Fs[:, :, t], select_keys, select_values, torch.tensor([num_objects]))
Es[:, :, t] = F.softmax(logit, dim=1)
# print('output size:', torch.max(Es[:,:,t]), torch.min(Es[:,:,t]))
# update
if t - 1 in to_memorize:
keys, values = this_keys, this_values
pred = np.argmax(Es[0].cpu().numpy(), axis=0).astype(np.uint8)
return pred, Es
Testset = DAVIS_MO_Test(DATA_ROOT, resolution='480p', imset='20{}/{}.txt'.format(YEAR, SET), single_object=(YEAR == 16))
Testloader = data.DataLoader(Testset, batch_size=1, shuffle=False, num_workers=0, pin_memory=True)
model = nn.DataParallel(models.graph_memory())
for param in model.parameters():
param.requires_grad = False
if torch.cuda.is_available():
model.cuda()
model.eval() # turn-off BN
pth_path = args.c
print('Loading weights:', pth_path)
checkpoint = torch.load(pth_path)
model.module.load_state_dict(checkpoint['net'])
try:
print('epoch:', checkpoint['epoch'])
except:
print('dont know epoch')
for seq, V in enumerate(Testloader):
Fs, Ms, num_objects, info = V
seq_name = info['name'][0]
num_frames = info['num_frames'][0].item()
print('[{}]: num_frames: {}, num_objects: {}'.format(seq_name, num_frames, num_objects[0][0]))
B, K, N, H, W = Fs.shape
H_1, W_1 = 480, int(480.0 * W / H)
# resize_sizes = [(int(0.75 * H_1), int(0.75 * W_1)), (H_1, W_1), (int(1.25 * H_1), int(1.25 * W_1))]
resize_sizes = [(H_1, W_1)]
use_flip = True
resize_Fs = []
resize_Ms = []
# ms
for size in resize_sizes:
resize_Fs.append(F.interpolate(input=Fs.squeeze(0).permute(1, 0, 2, 3), size=size, mode='bilinear',
align_corners=True).permute(1, 0, 2, 3).unsqueeze(0))
resize_Ms.append(F.interpolate(input=Ms.squeeze(0).permute(1, 0, 2, 3), size=size, mode='nearest'
).permute(1, 0, 2, 3).unsqueeze(0))
# flip
if use_flip:
for i in range(len(resize_Fs)):
resize_Fs.append(torch.flip(resize_Fs[i], [-1]))
resize_Ms.append(torch.flip(resize_Ms[i], [-1]))
Es_list = []
for i in range(len(resize_Fs)):
pred, Es = Run_video(resize_Fs[i], resize_Ms[i], num_frames, num_objects, Mem_every=5, Mem_number=None)
Es = F.interpolate(input=Es.squeeze(0).permute(1, 0, 2, 3), size=(H, W), mode='bilinear',
align_corners=True).permute(1, 0, 2, 3).unsqueeze(0)
if use_flip:
if i >= (len(resize_Fs) / 2):
Es = torch.flip(Es, [-1])
Es_list.append(Es)
Es = torch.stack(Es_list).mean(dim=0)
pred = np.argmax(Es[0].numpy(), axis=0).astype(np.uint8) # different than ytvos here
# pred, Es = Run_video(Fs, Ms, num_frames, num_objects, Mem_every=1, Mem_number=None,
# num_first_memory=num_first_memory, num_middle_memory=num_middle_memory)
# Save results for quantitative eval ######################
test_path = os.path.join('./test', code_name, seq_name)
if not os.path.exists(test_path):
os.makedirs(test_path)
for f in range(num_frames):
img_E = Image.fromarray(pred[f])
# print('image size:',type(YEAR),np.max(pred[f]),np.min(pred[f]))
if YEAR == 16:
# print('ok!')
img_E = (pred[f].squeeze() * 255).astype(np.uint8)
img_E = Image.fromarray(img_E)
img_E = img_E.convert('RGB')
else:
img_E.putpalette(palette)
img_E.save(os.path.join(test_path, '{:05d}.png'.format(f)))
if VIZ:
from tools.helpers import overlay_davis
# visualize results #######################
viz_path = os.path.join('./viz/', code_name, seq_name)
if not os.path.exists(viz_path):
os.makedirs(viz_path)
for f in range(num_frames):
pF = (Fs[0, :, f].permute(1, 2, 0).numpy() * 255.).astype(np.uint8)
pE = pred[f]
canvas = overlay_davis(pF, pE, palette)
canvas = Image.fromarray(canvas)
canvas.save(os.path.join(viz_path, 'f{}.jpg'.format(f)))
vid_path = os.path.join('./viz/', code_name, '{}.mp4'.format(seq_name))
frame_path = os.path.join('./viz/', code_name, seq_name, 'f%d.jpg')
|
[
"noreply@github.com"
] |
zxforchid.noreply@github.com
|
8f7c24bafbcae08fdd2c6e3e957913095fa47a2e
|
53d55a3123e6d2546e14033fbd3194eee2ec202e
|
/anyconfig/mdicts.py
|
b0b3cd31d959525fc402f8f42bf23dd9b8a1d568
|
[
"MIT"
] |
permissive
|
ajays20078/python-anyconfig1
|
28cc37c3987d5d6b3ba0c6030546646d9a2b6ed8
|
019c1e76ab824278b39c61718ec534c16ee7bba4
|
refs/heads/master
| 2021-01-22T17:43:05.819147
| 2017-03-15T05:49:11
| 2017-03-15T05:49:11
| 85,033,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,737
|
py
|
#
# Copyright (C) 2011 - 2016 Satoru SATOH <ssato redhat.com>
# License: MIT
#
# pylint: disable=unused-argument,unused-import
"""Wrapper of m9dicts.
.. versionadded: 0.5.0
Swtiched from mergeabledict to m9dicts
"""
from __future__ import absolute_import
import m9dicts
from m9dicts import (
MS_REPLACE, MS_NO_REPLACE, MS_DICTS, MS_DICTS_AND_LISTS, MERGE_STRATEGIES,
NTPL_CLS_KEY, UpdateWithReplaceDict, get, set_,
is_dict_like, is_namedtuple # flake8: noqa
)
def to_container(obj=None, ac_ordered=False, ac_merge=m9dicts.MS_DICTS,
ac_namedtuple=False, ac_ntpl_cls_key=NTPL_CLS_KEY, **options):
r"""
Factory function to create a dict-like object[s] supports merge operation
from a dict or any other objects.
.. seealso:: :func:`m9dicts.make`
:param obj: A dict or other object[s] or None
:param ordered:
Create an instance of OrderedMergeableDict instead of MergeableDict If
it's True. Please note that OrderedMergeableDict class will be chosen
for namedtuple objects regardless of this argument always to keep keys
(fields) order.
:param merge:
Specify strategy from MERGE_STRATEGIES of how to merge results loaded
from multiple configuration files.
:param _ntpl_cls_key:
Special keyword to embedded the class name of namedtuple object to the
dict-like object created. It's a hack and not elegant but I don't think
there are another ways to make same namedtuple object from objects
created from it.
:param options:
Optional keyword arguments for m9dicts.convert_to, will be converted to
the above ac_\* options respectively as needed.
"""
opts = dict(ordered=ac_ordered, merge=ac_merge,
_ntpl_cls_key=ac_ntpl_cls_key)
return m9dicts.make(obj, **opts)
def convert_to(obj, ac_ordered=True, ac_namedtuple=False,
ac_ntpl_cls_key=NTPL_CLS_KEY, **options):
r"""
Convert given `obj` :: m9dict object to a dict, dict or OrderedDict if
ac_ordered == True, or a namedtuple if ac_namedtuple == True.
.. seealso:: :func:`m9dicts.convert_to`
:param obj: A m9dict object to convert to
:param ac_ordered: OrderedDict will be chosen if True
:param ac_namedtuple: A namedtuple object will be chosen if True
:param ac_ntpl_cls_key: The name of namedtuple object
:param options:
Optional keyword arguments for m9dicts.convert_to, will be converted to
the above ac_\* options respectively as needed.
"""
opts = dict(ordered=ac_ordered, to_namedtuple=ac_namedtuple,
_ntpl_cls_key=ac_ntpl_cls_key)
return m9dicts.convert_to(obj, **opts)
# vim:sw=4:ts=4:et:
|
[
"ajays20078@gmail.com"
] |
ajays20078@gmail.com
|
6574ae1fcbc11e6bf70d5a25731690dbd28be356
|
69deed959c51503f6c04837f2dced99d97a00904
|
/Facebook/linked_list/merge_two_sorted_lists.py
|
d180af856c5db249c1b3579369e9509a410f2a8d
|
[] |
no_license
|
AbhinavPelapudi/leetcode_problems
|
2d9ca9609d0be57750ed8fb19743e6654cafd466
|
012690bd407dd387dcc248442a5eb8d1d140a0f8
|
refs/heads/master
| 2022-02-21T22:38:38.656265
| 2019-08-06T04:22:21
| 2019-08-06T04:22:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
# Merge Two Sorted Lists
"""
Input: 1->2->4, 1->3->4
Output: 1->1->2->3->4->4
*
1->2->4
*
1->3->4
new_ll = 1 -> 1 -> 2 -> 3 -> 4 -> 4
"""
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
if not l1 or not l2:
return l1 or l2
head = None
current = None
while l1 and l2:
if l1.val < l2.val:
new_node = ListNode(l1.val)
if not current:
head = new_node
current = new_node
l1 = l1.next
continue
current.next = new_node
current = new_node
l1 = l1.next
else:
new_node = ListNode(l2.val)
if not current:
head = new_node
current = new_node
l2 = l2.next
continue
current.next = new_node
current = new_node
l2 = l2.next
if l1:
current.next = l1
if l2:
current.next = l2
return head
|
[
"inas.raheema@gmail.com"
] |
inas.raheema@gmail.com
|
9492aeca9ddde4f4843955387c6b1b6a608ccd9c
|
5e391906360e0d401e230b78a024bc2538dc2ed8
|
/catalog/apps.py
|
a012accb9ac0b7486269d5cae6c4c832cba62682
|
[] |
no_license
|
spiritEcosse/clavutich
|
0588d618082c31de7b1da3b914bca0b97253c902
|
592447e0f06d8eed153e1216e74de8f69cc2abb9
|
refs/heads/master
| 2020-04-22T10:00:02.251387
| 2015-12-03T16:08:31
| 2015-12-03T16:08:31
| 170,290,479
| 0
| 0
| null | 2019-05-21T08:45:28
| 2019-02-12T09:32:25
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 170
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'igor'
from django.apps import AppConfig
class CatalogConfig(AppConfig):
name = 'catalog'
verbose_name = u"Каталог"
|
[
"shevchenkcoigor@gmail.com"
] |
shevchenkcoigor@gmail.com
|
b9fa4175fed2e6e5719cb8fc0fc582c3f52a31c7
|
81a6ffe0e91a9baf57ad88e2c16f882729bf63cf
|
/project/RMSE/read.py
|
d3858e0944605c9f3fec2b7eebd10ba854bffeb6
|
[] |
no_license
|
Lovingmylove/python.sc
|
a5014438288aefb5cbf581ff54421cc1fe493994
|
970451d3e12d3d68ea199de844e6cb8a39f1a861
|
refs/heads/master
| 2021-01-17T06:03:26.918849
| 2016-06-28T08:30:05
| 2016-06-28T08:30:05
| 54,813,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
import numpy as np
f = open(r"C:\Users\tudan\Desktop\example\datax.txt","r")
arrx=[]
flag = 0
for lines in f.readlines():
lines=lines.replace("\n","").split(" ")
arrx.append(lines)
flag += 1
if flag > 3000:
break
f.close()
arr_x =[]
for i in np.arange(0,len(arrx)):
arr_x.append(float(arrx[i][0]))
print len(arr_x)
|
[
"kejinlong@bupt.edu.cn"
] |
kejinlong@bupt.edu.cn
|
68d3ebad1df4b47b9795363c08a16b6765abd3f7
|
879b2ab2bebda775def40b2c1ee48c08f87c1fb0
|
/aiokraken/rest/schemas/kopenorder.py
|
4e4668789d5bfa51e7f242ff3fbd1eef94c52aa2
|
[
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
asmodehn/aiokraken
|
daf5c9dabd256189621abb89831cdf595c58a891
|
b260bd41d5aa091e6a4f1818328426fbe6f625c0
|
refs/heads/develop
| 2023-09-03T11:15:54.237448
| 2020-08-18T15:11:54
| 2020-08-18T15:11:54
| 203,133,738
| 1
| 0
|
MIT
| 2023-09-14T02:14:38
| 2019-08-19T08:45:13
|
Python
|
UTF-8
|
Python
| false
| false
| 8,129
|
py
|
import typing
from datetime import datetime
from decimal import Decimal
from enum import (IntEnum)
from dataclasses import dataclass, field
from hypothesis.strategies import composite
from marshmallow import fields, post_load
from hypothesis import strategies as st
if not __package__:
__package__ = 'aiokraken.rest.schemas'
from .base import BaseSchema
from .kordertype import KOrderTypeModel, KOrderTypeField
from .ktm import AbsoluteTimeStrategy, TimerField
from .korderdescr import (KOrderDescrNoPriceFinalized,
KOrderDescrOnePriceFinalized,
KOrderDescrTwoPriceFinalized,
KOrderDescrNoPriceStrategy,
KOrderDescrOnePriceStrategy,
KOrderDescrTwoPriceStrategy,
KOrderDescrFinalizeStrategy,
KOrderDescrSchema,
)
@dataclass(frozen=True)
class KOpenOrderModel:
descr: typing.Union[KOrderDescrNoPriceFinalized,
KOrderDescrOnePriceFinalized,
KOrderDescrTwoPriceFinalized, ]
status: str # TODO
starttm: datetime
opentm: datetime
expiretm: datetime
price: Decimal
limitprice: Decimal
stopprice: Decimal
vol: Decimal
vol_exec: Decimal
fee: Decimal
cost: Decimal
misc: str # TODO
oflags: str # TODO
refid: typing.Optional[int] = None # TODO
userref: typing.Optional[int] = None # TODO
trades: typing.Optional[typing.List[str]] = None
@composite
def OpenOrderStrategy(draw,
descr= st.one_of([
KOrderDescrFinalizeStrategy(strategy=KOrderDescrNoPriceStrategy()),
KOrderDescrFinalizeStrategy(strategy=KOrderDescrOnePriceStrategy()),
KOrderDescrFinalizeStrategy(strategy=KOrderDescrTwoPriceStrategy())
]),
status= st.text(max_size=5), # TODO
starttm= AbsoluteTimeStrategy(),
opentm= AbsoluteTimeStrategy(),
expiretm= AbsoluteTimeStrategy(),
# CAreful here : consistency with descr content ???
price= st.decimals(allow_nan=False, allow_infinity=False),
limitprice= st.decimals(allow_nan=False, allow_infinity=False),
stopprice= st.decimals(allow_nan=False, allow_infinity=False),
vol= st.decimals(allow_nan=False, allow_infinity=False),
vol_exec= st.decimals(allow_nan=False, allow_infinity=False),
fee= st.decimals(allow_nan=False, allow_infinity=False),
cost= st.decimals(allow_nan=False, allow_infinity=False),
misc= st.text(max_size=5), # TODO
oflags= st.text(max_size=5), # TODO
refid=st.integers(), # TODO
userref= st.integers(), # TODO
trades= st.lists(st.text(max_size=5),max_size=5)
):
return KOpenOrderModel(
descr = draw(descr),
status = draw(status),
starttm= draw(starttm),
opentm =draw(opentm),
expiretm= draw(expiretm),
price= draw(price),
limitprice=draw(limitprice),
stopprice=draw(stopprice),
vol=draw(vol),
vol_exec=draw(vol_exec),
fee= draw(fee),
cost= draw(cost),
misc=draw(misc),
oflags=draw(oflags),
refid=draw(refid),
userref=draw(userref),
trades=draw(trades),
)
class KOpenOrderSchema(BaseSchema):
refid = fields.Integer(allow_none=True)
userref = fields.Integer(allow_none=True)
status = fields.Str()
# pending = order pending book entry
# open = open order
# closed = closed order
# canceled = order canceled
# expired = order expired
opentm = TimerField()
starttm = TimerField()
expiretm = TimerField()
descr = fields.Nested(KOrderDescrSchema())
vol = fields.Decimal(as_string=True) #(base currency unless viqc set in oflags)
vol_exec = fields.Decimal(as_string=True) #(base currency unless viqc set in oflags)
cost = fields.Decimal(as_string=True) #(quote currency unless unless viqc set in oflags)
fee = fields.Decimal(as_string=True) #(quote currency)
price = fields.Decimal(as_string=True) #(quote currency unless viqc set in oflags)
stopprice = fields.Decimal(as_string=True) #(quote currency, for trailing stops)
limitprice = fields.Decimal(as_string=True) #(quote currency, when limit based order type triggered)
misc = fields.Str() #comma delimited list of miscellaneous info
# stopped = triggered by stop price
# touched = triggered by touch price
# liquidated = liquidation
# partial = partial fill
oflags = fields.Str() #comma delimited list of order flags
# viqc = volume in quote currency
# fcib = prefer fee in base currency (default if selling)
# fciq = prefer fee in quote currency (default if buying)
# nompp = no market price protection
trades = fields.List(fields.Str(), required=False) #array of trade ids related to order
@post_load
def build_model(self, data, **kwargs):
return KOpenOrderModel(**data)
@st.composite
def OpenOrderDictStrategy(draw,
# Here we mirror arguments for the model strategy
descr= st.one_of([
KOrderDescrFinalizeStrategy(strategy=KOrderDescrNoPriceStrategy()),
KOrderDescrFinalizeStrategy(strategy=KOrderDescrOnePriceStrategy()),
KOrderDescrFinalizeStrategy(strategy=KOrderDescrTwoPriceStrategy())
]),
status= st.text(max_size=5), # TODO
starttm= AbsoluteTimeStrategy(),
opentm= AbsoluteTimeStrategy(),
expiretm= AbsoluteTimeStrategy(),
price= st.decimals(allow_nan=False, allow_infinity=False),
limitprice= st.decimals(allow_nan=False, allow_infinity=False),
stopprice= st.decimals(allow_nan=False, allow_infinity=False),
vol= st.decimals(allow_nan=False, allow_infinity=False),
vol_exec= st.decimals(allow_nan=False, allow_infinity=False),
fee= st.decimals(allow_nan=False, allow_infinity=False),
cost= st.decimals(allow_nan=False, allow_infinity=False),
misc= st.text(max_size=5), # TODO
oflags= st.text(max_size=5), # TODO
refid=st.integers(), # TODO
userref= st.integers(), # TODO
trades= st.lists(st.text(max_size=5),max_size=5),
):
model = draw(OpenOrderStrategy(descr= descr,
status= status,
starttm= starttm,
opentm= opentm,
expiretm= expiretm,
price= price,
limitprice= limitprice,
stopprice= stopprice,
vol= vol,
vol_exec= vol_exec,
fee= fee,
cost= cost,
misc= misc, # TODO
oflags= oflags, # TODO
refid=refid, # TODO
userref= userref, # TODO
trades=trades,
))
schema = KOpenOrderSchema()
return schema.dump(model)
class OpenOrdersResponseSchema(BaseSchema):
open = fields.Dict(keys=fields.Str(), values=fields.Nested(KOpenOrderSchema()))
@post_load
def build_model(self, data, **kwargs):
return data['open']
if __name__ == "__main__":
import pytest
pytest.main(['-s', '--doctest-modules', '--doctest-continue-on-failure', __file__])
|
[
"asmodehn@gmail.com"
] |
asmodehn@gmail.com
|
416d5e8ac61ea615442a2dda82737cb000a0f902
|
e35fd52fe4367320024a26f2ee357755b5d5f4bd
|
/leetcode/problems/23.merge-k-sorted-lists.py
|
4479a50997da222d2d91ddbef1ace9cd7299a4ff
|
[] |
no_license
|
liseyko/CtCI
|
a451967b0a0ce108c491d30b81e88d20ad84d2cd
|
c27f19fac14b4acef8c631ad5569e1a5c29e9e1f
|
refs/heads/master
| 2020-03-21T14:28:47.621481
| 2019-11-12T22:59:07
| 2019-11-12T22:59:07
| 138,658,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
#
# @lc app=leetcode id=23 lang=python3
#
# [23] Merge k Sorted Lists
#
# https://leetcode.com/problems/merge-k-sorted-lists/description/
#
# algorithms
# Hard (36.88%)
# Total Accepted: 488.2K
# Total Submissions: 1.3M
# Testcase Example: '[[1,4,5],[1,3,4],[2,6]]'
#
# Merge k sorted linked lists and return it as one sorted list. Analyze and
# describe its complexity.
#
# Example:
#
#
# Input:
# [
# 1->4->5,
# 1->3->4,
# 2->6
# ]
# Output: 1->1->2->3->4->4->5->6
#
#
#
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
|
[
"liseyko@gmail.com"
] |
liseyko@gmail.com
|
bc34256d74c603736a6f0f555db4848bc14ff7ae
|
b3d954416dabd441425f193a8470f8f6d518a47a
|
/tests.py
|
7c54ffc1bcf8547bf7b5e497af64b9c1a3437379
|
[] |
no_license
|
amirul-ifty/Online-Food-Order
|
9d5bcd6d991210aad8516d25fb9aa6e466b5cc94
|
97d7211d6a6eef6f3ce69ab24c35dff29d81087a
|
refs/heads/master
| 2023-08-18T07:58:39.467248
| 2021-09-20T19:17:27
| 2021-09-20T19:17:27
| 402,524,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,802
|
py
|
from django.test import TestCase
from .models import Item, Reviews, CartItems
from django.contrib.auth import get_user_model
from django.urls import reverse
User = get_user_model()
class ItemModelTest(TestCase):
def setUp(self):
item = Item()
item.title = 'Kacchi Biriyani'
item.price = 250.00
item.slug = "item_name"
item.save()
Item.objects.create(title='Hyderabadi Dam Biriyani', price=250.00, slug="biriyani-slug")
def test_title_label(self):
item = Item.objects.get(slug="item_name")
print("Printing the item name here----------->", item)
self.assertEqual(item.title.lower(), "Kacchi Biriyani".lower())
def test_item_exists(self):
count = Item.objects.all().count()
self.assertEqual(count, 2)
def test_price_amount(self):
item = Item.objects.get(slug="item_name")
self.assertEqual(item.price, 250.00)
def test_get_items_with_equal_price(self):
count = Item.objects.filter(price=250.00).count()
self.assertEqual(count, 2)
def test_get_items_default_value(self):
items = Item.objects.all()
for item in items:
self.assertEqual(item.pieces, 6)
def test_get_absolute_url(self):
url = reverse("main:dishes", kwargs={"slug": "item_name"})
self.assertEqual("/dishes/item_name", url)
def test_get_add_to_cart_url(self):
url = reverse("main:add-to-cart", kwargs={"slug": "item_name"})
self.assertEqual("/add-to-cart/item_name/", url)
def test_get_item_delete_url(self):
url = reverse("main:item-delete", kwargs={"slug": "Biriyani"})
self.assertEqual("/item-delete/item_name/", url)
def test_get_update_item_url(self):
return reverse("main:item-update", kwargs={"slug": "item_name"})
self.assertEqual("/item-update/item_name", url)
class TestReviewsModels(TestCase):
def setUp(self):
user = User()
user.username = "admin"
user.email = "admin111@gmail.com"
user.is_member = True
user.is_superuser = True
user.set_password("123456")
user.save()
item = Item()
item.title = 'Kacchi Biriyani'
item.price = 250.00
item.slug = "item_name"
item.save()
add_reviews = Reviews()
add_reviews.review = "It was delicious!!! You can tey that!"
add_reviews.user = user
add_reviews.item = item
add_reviews.save()
def test_review_exist(self):
count_reviews = Reviews.objects.all().count()
self.assertEqual(count_reviews, 1)
class TestCartItemsModels(TestCase):
def setUp(self):
user = User()
user.username = "admin"
user.email = "admin111@gmail.com"
user.is_member = True
user.is_superuser = True
user.set_password("123456")
user.save()
item = Item()
item.title = 'Kacchi Biriyani'
item.price = 250.00
item.slug = "item_name"
item.save()
new_cart_item = CartItems()
new_cart_item.status = "Active"
new_cart_item.user = user
new_cart_item.item = item
new_cart_item.save()
def test_cart_item_exist(self):
count_cart_items = CartItems.objects.all().count()
self.assertEqual(count_cart_items, 1)
def test_get_remove_from_cart_url(self):
url = reverse("main:remove-from-cart", kwargs={"pk": "1"})
self.assertEqual("/remove-from-cart/1/", url)
def test_update_status_url(self):
url = reverse("main:update_status", kwargs={"pk": "0"})
self.assertEqual("/update_status/0", url)
|
[
"noreply@github.com"
] |
amirul-ifty.noreply@github.com
|
f3a4fc09049854e9e1c48dbab9ad03c5c8b70db2
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/forecast/testcase/firstcases/testcase5_017.py
|
211171842908b66ac53a7f862282173896e1cad3
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,538
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'cz.martykan.forecastie',
'appActivity' : 'cz.martykan.forecastie.activities.SplashActivity',
'resetKeyboard' : True,
'androidCoverage' : 'cz.martykan.forecastie/cz.martykan.forecastie.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase017
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"Tomorrow\")", "new UiSelector().className(\"android.widget.TextView\").instance(14)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Graphs\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Later\")", "new UiSelector().className(\"android.widget.TextView\").instance(15)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"5_017\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'cz.martykan.forecastie'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
cb143e8129e5caef36df3289be6a72ec78d5faa7
|
a111218bccd887548d3b0bb18527c41fb4ea83bf
|
/forloop.py
|
32c261e06a3729d9f8dca6970770741b91de5eed
|
[] |
no_license
|
kollasailesh/python_webcrawler
|
46dd546db0cc293b09c505d7f569a864dea71cab
|
7177517720b173a5547909b0b09aa876cd153a24
|
refs/heads/master
| 2016-09-06T00:00:11.034732
| 2015-06-20T23:01:12
| 2015-06-20T23:01:12
| 37,297,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
foods = ['pizza', 'cereals','burger', 'rice' ]
for f in foods:
print(f)
print(len(f))
|
[
"saileshkolla91@gmail.com"
] |
saileshkolla91@gmail.com
|
c301b38049c9eedd7ac711e88586d0d9e5632fc5
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/teams/tests/test_serializers.py
|
e8e1fc25549a798bab497111566d7c284eb3018a
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252
| 2021-11-22T12:14:34
| 2021-11-22T12:14:34
| 163,850,454
| 3
| 1
|
MIT
| 2021-11-22T12:12:31
| 2019-01-02T14:21:30
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 11,073
|
py
|
"""
Tests for custom Teams Serializers.
"""
import six
from django.core.paginator import Paginator
from django.test.client import RequestFactory
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory, UserFactory
from lms.djangoapps.teams.serializers import BulkTeamCountTopicSerializer, MembershipSerializer, TopicSerializer
from lms.djangoapps.teams.tests.factories import CourseTeamFactory, CourseTeamMembershipFactory
from openedx.core.lib.teams_config import TeamsConfig
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class SerializerTestCase(SharedModuleStoreTestCase):
"""
Base test class to set up a course with topics
"""
def setUp(self):
"""
Set up a course with a teams configuration.
"""
super().setUp()
self.course = CourseFactory.create(
teams_configuration=TeamsConfig({
"max_team_size": 10,
"topics": [{'name': 'Tøpic', 'description': 'The bést topic!', 'id': '0'}]
}),
)
class MembershipSerializerTestCase(SerializerTestCase):
"""
Tests for the membership serializer.
"""
def setUp(self):
super().setUp()
self.team = CourseTeamFactory.create(
course_id=self.course.id,
topic_id=self.course.teamsets[0].teamset_id,
)
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.team_membership = CourseTeamMembershipFactory.create(team=self.team, user=self.user)
def test_membership_serializer_expand_user_and_team(self):
"""Verify that the serializer only expands the user and team one level."""
data = MembershipSerializer(self.team_membership, context={
'expand': ['team', 'user'],
'request': RequestFactory().get('/api/team/v0/team_membership')
}).data
username = self.user.username
assert data['user'] == {'url': ('http://testserver/api/user/v1/accounts/' + username),
'username': username,
'profile_image': {'image_url_full': 'http://testserver/static/default_500.png',
'image_url_large': 'http://testserver/static/default_120.png',
'image_url_medium': 'http://testserver/static/default_50.png',
'image_url_small': 'http://testserver/static/default_30.png',
'has_image': False}, 'account_privacy': 'private'}
assert 'membership' not in data['team']
class TopicSerializerTestCase(SerializerTestCase):
"""
Tests for the `TopicSerializer`, which should serialize team count data for
a single topic.
"""
def test_topic_with_no_team_count(self):
"""
Verifies that the `TopicSerializer` correctly displays a topic with a
team count of 0, and that it takes a known number of SQL queries.
"""
with self.assertNumQueries(2):
serializer = TopicSerializer(
self.course.teamsets[0].cleaned_data,
context={'course_id': self.course.id},
)
assert serializer.data == {'name': 'Tøpic', 'description': 'The bést topic!', 'id': '0',
'team_count': 0, 'type': 'open', 'max_team_size': None}
def test_topic_with_team_count(self):
"""
Verifies that the `TopicSerializer` correctly displays a topic with a
positive team count, and that it takes a known number of SQL queries.
"""
CourseTeamFactory.create(
course_id=self.course.id, topic_id=self.course.teamsets[0].teamset_id
)
with self.assertNumQueries(2):
serializer = TopicSerializer(
self.course.teamsets[0].cleaned_data,
context={'course_id': self.course.id},
)
assert serializer.data == {'name': 'Tøpic', 'description': 'The bést topic!', 'id': '0',
'team_count': 1, 'type': 'open', 'max_team_size': None}
def test_scoped_within_course(self):
"""Verify that team count is scoped within a course."""
duplicate_topic = self.course.teamsets[0].cleaned_data
second_course = CourseFactory.create(
teams_configuration=TeamsConfig({
"max_team_size": 10,
"topics": [duplicate_topic]
}),
)
CourseTeamFactory.create(course_id=self.course.id, topic_id=duplicate_topic['id'])
CourseTeamFactory.create(course_id=second_course.id, topic_id=duplicate_topic['id'])
with self.assertNumQueries(2):
serializer = TopicSerializer(
self.course.teamsets[0].cleaned_data,
context={'course_id': self.course.id},
)
assert serializer.data == {'name': 'Tøpic', 'description': 'The bést topic!', 'id': '0',
'team_count': 1, 'type': 'open', 'max_team_size': None}
class BaseTopicSerializerTestCase(SerializerTestCase):
"""
Base class for testing the two paginated topic serializers.
"""
__test__ = False
PAGE_SIZE = 5
# Extending test classes should specify their serializer class.
serializer = None
def _merge_dicts(self, first, second):
"""Convenience method to merge two dicts in a single expression"""
result = first.copy()
result.update(second)
return result
def setup_topics(self, num_topics=5, teams_per_topic=0):
"""
Helper method to set up topics on the course. Returns a list of
created topics.
"""
topics = [
{
'name': f'Tøpic {i}',
'description': f'The bést topic! {i}',
'id': str(i),
'type': 'open',
'max_team_size': i + 10
}
for i in six.moves.range(num_topics)
]
for topic in topics:
for _ in six.moves.range(teams_per_topic):
CourseTeamFactory.create(course_id=self.course.id, topic_id=topic['id'])
self.course.teams_configuration = TeamsConfig({
'max_team_size': self.course.teams_configuration.default_max_team_size,
'topics': topics,
})
return topics
def assert_serializer_output(self, topics, num_teams_per_topic, num_queries):
"""
Verify that the serializer produced the expected topics.
"""
with self.assertNumQueries(num_queries):
page = Paginator(
self.course.teams_configuration.cleaned_data['teamsets'],
self.PAGE_SIZE,
).page(1)
# pylint: disable=not-callable
serializer = self.serializer(instance=page, context={'course_id': self.course.id})
assert serializer.data['results'] ==\
[self._merge_dicts(topic, {'team_count': num_teams_per_topic}) for topic in topics]
def test_no_topics(self):
"""
Verify that we return no results and make no SQL queries for a page
with no topics.
"""
self.course.teams_configuration = TeamsConfig({'topics': []})
self.assert_serializer_output([], num_teams_per_topic=0, num_queries=0)
class BulkTeamCountTopicSerializerTestCase(BaseTopicSerializerTestCase):
"""
Tests for the `BulkTeamCountTopicSerializer`, which should serialize team_count
data for many topics with constant time SQL queries.
"""
__test__ = True
serializer = BulkTeamCountTopicSerializer
NUM_TOPICS = 6
def setUp(self):
super().setUp()
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def test_topics_with_no_team_counts(self):
"""
Verify that we serialize topics with no team count, making only one SQL
query.
"""
topics = self.setup_topics(teams_per_topic=0)
self.assert_serializer_output(topics, num_teams_per_topic=0, num_queries=2)
def test_topics_with_team_counts(self):
"""
Verify that we serialize topics with a positive team count, making only
one SQL query.
"""
teams_per_topic = 10
topics = self.setup_topics(teams_per_topic=teams_per_topic)
self.assert_serializer_output(topics, num_teams_per_topic=teams_per_topic, num_queries=2)
def test_subset_of_topics(self):
"""
Verify that we serialize a subset of the course's topics, making only
one SQL query.
"""
teams_per_topic = 10
topics = self.setup_topics(num_topics=self.NUM_TOPICS, teams_per_topic=teams_per_topic)
self.assert_serializer_output(topics, num_teams_per_topic=teams_per_topic, num_queries=2)
def test_scoped_within_course(self):
"""Verify that team counts are scoped within a course."""
teams_per_topic = 10
first_course_topics = self.setup_topics(num_topics=self.NUM_TOPICS, teams_per_topic=teams_per_topic)
duplicate_topic = first_course_topics[0]
second_course = CourseFactory.create(
teams_configuration=TeamsConfig({
"max_team_size": 10,
"topics": [duplicate_topic]
}),
)
CourseTeamFactory.create(course_id=second_course.id, topic_id=duplicate_topic['id'])
self.assert_serializer_output(first_course_topics, num_teams_per_topic=teams_per_topic, num_queries=2)
def _merge_dicts(self, first, second):
"""Convenience method to merge two dicts in a single expression"""
result = first.copy()
result.update(second)
return result
def assert_serializer_output(self, topics, num_teams_per_topic, num_queries):
"""
Verify that the serializer produced the expected topics.
"""
# Set a request user
request = RequestFactory().get('/api/team/v0/topics')
request.user = self.user
with self.assertNumQueries(num_queries):
serializer = self.serializer(
topics,
context={
'course_id': self.course.id,
'request': request
},
many=True
)
assert serializer.data ==\
[self._merge_dicts(topic, {'team_count': num_teams_per_topic}) for topic in topics]
def test_no_topics(self):
"""
Verify that we return no results and make no SQL queries for a page
with no topics.
"""
self.course.teams_configuration = TeamsConfig({'topics': []})
self.assert_serializer_output([], num_teams_per_topic=0, num_queries=1)
|
[
"rafael.luque@osoco.es"
] |
rafael.luque@osoco.es
|
b3d2c5e9f3ecb1506079e9b38cf5443f24647f86
|
1d9f5feed8da4885513ef110d417e04cfe579880
|
/Python/FileDirectoryTools/scanfile.py
|
7f3adc71c092001d994a364ddff55d90ef65d410
|
[] |
no_license
|
hkailee/CodeBase
|
4ebf810c24b8ffe8246253fe170fdd8237380d21
|
12b1ac4a9522e5d7a0c35d84cf86eea595dc82aa
|
refs/heads/master
| 2021-01-20T22:55:14.185372
| 2017-09-12T09:55:43
| 2017-09-12T09:55:43
| 101,830,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
########## First scanner ##########
def scanner1 (name, function):
file = open(name, 'r') # create a file object
while True:
line = file.readline() # call file method
if not line: break # until end-of-file
function(line) # call a function object
file.close()
# The following are recommended for higher speed using for loop than the above while loop
########## Second scanner ##########
def scanner2(name, function):
for line in open(name, 'r'): # scan line-by-line
function(line) # call a function
########## Third scanner ##########
def scanner3(name, function):
list(map(function, open(name, 'r')))
########## Third scanner ##########
def scanner4(name, function):
[function(line) for line in open(name, 'r')]
########## Third scanner ##########
def scanner5(name, function):
list(function(line) for line in open(name,'r'))
|
[
"leehongkai@gmail.com"
] |
leehongkai@gmail.com
|
756bbcefaf60cdc7a71558cc6c6476b8acca64cd
|
e1e7db1eb2f07dc5421f98bb1ce941aab0165b39
|
/ImportTradeRecord/db.py
|
62215d4d418e6a4c27f4455ea44734a005c87e6f
|
[] |
no_license
|
muqingliu/trade_tool
|
baae112c780e941e100e677dfe2e6367f3378383
|
d295f1c484b5eb5832850b35ba4b6912ec11f0f8
|
refs/heads/master
| 2022-12-02T07:15:52.625980
| 2020-08-17T22:04:47
| 2020-08-17T22:04:47
| 288,291,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,992
|
py
|
import MySQLdb
import log
import time
class DB(object):
cursor = None
conn = None
table_name = None
def __init__(this, host, user, passwd, db, table='', charset='gbk', port=3306):
this.host = host
this.user = user
this.passwd = passwd
this.db = db
this.table_name = table
this.charset = charset
this.port = port
this.__connect()
def __connect(this):
try:
this.conn = MySQLdb.connect(this.host, this.user, this.passwd, this.db, this.port, charset=this.charset)
this.cursor = this.conn.cursor()
# this.cursor.execute("SET NAMES %s" % this.charset)
except Exception, msg:
log.WriteError ('Error: cant connect db names %s from host %s %s' % (this.db, this.host, msg) )
def IsConnect(this):
return this.conn != None
def query(this, sql):
this.conn.query(sql)
def ReConnect(this):
this.__connect()
def Query(this, sql, *the_tuple):
time_begin = time.time()
this.cursor.execute(sql % the_tuple)
# print "time:%d SQL[%s]" % ((time.time()-time_begin), sql % the_tuple)
def Execute(this, sql, *the_tuple):
if the_tuple:
return this.cursor.execute(sql, the_tuple)
else:
return this.cursor.execute(sql)
#this.Commit()
def Executemany(this, sqls, params):
this.cursor.executemany(sqls, params)
# this.conn.commit()
def Commit(this):
this.conn.commit()
# row object
def FetchOne(this):
return this.cursor.fetchone()
def FetchOne_1_Col(this):
return this.FetchOne()[0]
#list of row object
def FetchAll(this):
return this.cursor.fetchall()
def StoreResult(this):
return this.conn.store_result()
def __del__(this):
if this.conn:
this.cursor.close()
this.conn.close()
|
[
"tidusfantasy2008@sina.com"
] |
tidusfantasy2008@sina.com
|
dfad732ae9c63eb450d71b27f699c22f04fdb892
|
cbd48050b3f8473fbb4828acbe0e1f97d56ed0e7
|
/products/migrations/0001_initial.py
|
0292f3a8fc0942fac371f142d4c3b66dfc1a9dc1
|
[] |
no_license
|
Harithaskcet/FindMe
|
eb49711d45cd82983736abf5778c9b96c4ec694b
|
2f6c398c8adeec0196945b17e49ae5d17f4468e5
|
refs/heads/master
| 2022-12-10T23:16:10.820916
| 2020-09-13T15:44:31
| 2020-09-13T15:44:31
| 295,168,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
# Generated by Django 2.2.15 on 2020-09-13 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Products',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('url', models.URLField()),
('pub_date', models.DateField()),
('votes_total', models.IntegerField(default=1)),
('body', models.TextField()),
('image', models.ImageField(upload_to='uploads/')),
('icon', models.ImageField(upload_to='uploads/')),
],
),
]
|
[
"haritha.msk@mrcooper.com"
] |
haritha.msk@mrcooper.com
|
1a7d9392657ac30b419bd70da409e4ad4a9ebfc8
|
46eb68af43312656254983e783cbdfd21c3a5176
|
/squad/util.py
|
9765be07ccede3ee44c76d9c2fbd44dbbf765dd5
|
[] |
no_license
|
sk3391/CS224N
|
1cbb1eb4c8c172415f207c6e3aa6119a313bebe3
|
1071758de53645263246598b74a04f4c113f0f19
|
refs/heads/master
| 2022-04-14T06:15:59.767641
| 2020-03-28T15:00:51
| 2020-03-28T15:00:51
| 169,329,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,430
|
py
|
"""Utility classes and methods.
"""
import logging
import os
import queue
import re
import shutil
import string
import torch
import torch.nn.functional as F
import torch.utils.data as data
import tqdm
import numpy as np
import ujson as json
from collections import Counter
class SQuAD(data.Dataset):
"""Stanford Question Answering Dataset (SQuAD).
Each item in the dataset is a tuple with the following entries (in order):
- context_idxs: Indices of the words in the context.
Shape (context_len,).
- context_char_idxs: Indices of the characters in the context.
Shape (context_len, max_word_len).
- question_idxs: Indices of the words in the question.
Shape (question_len,).
- question_char_idxs: Indices of the characters in the question.
Shape (question_len, max_word_len).
- y1: Index of word in the context where the answer begins.
-1 if no answer.
- y2: Index of word in the context where the answer ends.
-1 if no answer.
- id: ID of the example.
Args:
data_path (str): Path to .npz file containing pre-processed dataset.
use_v2 (bool): Whether to use SQuAD 2.0 questions. Otherwise only use SQuAD 1.1.
"""
def __init__(self, data_path, use_v2=True):
super(SQuAD, self).__init__()
dataset = np.load(data_path)
self.context_idxs = torch.from_numpy(dataset['context_idxs']).long()
self.context_char_idxs = torch.from_numpy(dataset['context_char_idxs']).long()
self.question_idxs = torch.from_numpy(dataset['ques_idxs']).long()
self.question_char_idxs = torch.from_numpy(dataset['ques_char_idxs']).long()
self.y1s = torch.from_numpy(dataset['y1s']).long()
self.y2s = torch.from_numpy(dataset['y2s']).long()
if use_v2:
# SQuAD 2.0: Use index 0 for no-answer token (token 1 = OOV)
batch_size, c_len, w_len = self.context_char_idxs.size()
ones = torch.ones((batch_size, 1), dtype=torch.int64)
self.context_idxs = torch.cat((ones, self.context_idxs), dim=1)
self.question_idxs = torch.cat((ones, self.question_idxs), dim=1)
ones = torch.ones((batch_size, 1, w_len), dtype=torch.int64)
self.context_char_idxs = torch.cat((ones, self.context_char_idxs), dim=1)
self.question_char_idxs = torch.cat((ones, self.question_char_idxs), dim=1)
self.y1s += 1
self.y2s += 1
# SQuAD 1.1: Ignore no-answer examples
self.ids = torch.from_numpy(dataset['ids']).long()
self.valid_idxs = [idx for idx in range(len(self.ids))
if use_v2 or self.y1s[idx].item() >= 0]
def __getitem__(self, idx):
idx = self.valid_idxs[idx]
example = (self.context_idxs[idx],
self.context_char_idxs[idx],
self.question_idxs[idx],
self.question_char_idxs[idx],
self.y1s[idx],
self.y2s[idx],
self.ids[idx])
return example
def __len__(self):
return len(self.valid_idxs)
def collate_fn(examples):
"""Create batch tensors from a list of individual examples returned
by `SQuAD.__getitem__`. Merge examples of different length by padding
all examples to the maximum length in the batch.
Args:
examples (list): List of tuples of the form (context_idxs, context_char_idxs,
question_idxs, question_char_idxs, y1s, y2s, ids).
Returns:
examples (tuple): Tuple of tensors (context_idxs, context_char_idxs, question_idxs,
question_char_idxs, y1s, y2s, ids). All of shape (batch_size, ...), where
the remaining dimensions are the maximum length of examples in the input.
Adapted from:
https://github.com/yunjey/seq2seq-dataloader
"""
def merge_0d(scalars, dtype=torch.int64):
return torch.tensor(scalars, dtype=dtype)
def merge_1d(arrays, dtype=torch.int64, pad_value=0):
lengths = [(a != pad_value).sum() for a in arrays]
padded = torch.zeros(len(arrays), max(lengths), dtype=dtype)
for i, seq in enumerate(arrays):
end = lengths[i]
padded[i, :end] = seq[:end]
return padded
def merge_2d(matrices, dtype=torch.int64, pad_value=0):
heights = [(m.sum(1) != pad_value).sum() for m in matrices]
widths = [(m.sum(0) != pad_value).sum() for m in matrices]
padded = torch.zeros(len(matrices), max(heights), max(widths), dtype=dtype)
for i, seq in enumerate(matrices):
height, width = heights[i], widths[i]
padded[i, :height, :width] = seq[:height, :width]
return padded
# Group by tensor type
context_idxs, context_char_idxs, \
question_idxs, question_char_idxs, \
y1s, y2s, ids = zip(*examples)
# Merge into batch tensors
context_idxs = merge_1d(context_idxs)
context_char_idxs = merge_2d(context_char_idxs)
question_idxs = merge_1d(question_idxs)
question_char_idxs = merge_2d(question_char_idxs)
y1s = merge_0d(y1s)
y2s = merge_0d(y2s)
ids = merge_0d(ids)
return (context_idxs, context_char_idxs,
question_idxs, question_char_idxs,
y1s, y2s, ids)
class AverageMeter:
"""Keep track of average values over time.
Adapted from:
> https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
"""Reset meter."""
self.__init__()
def update(self, val, num_samples=1):
"""Update meter with new value `val`, the average of `num` samples.
Args:
val (float): Average value to update the meter with.
num_samples (int): Number of samples that were averaged to
produce `val`.
"""
self.count += num_samples
self.sum += val * num_samples
self.avg = self.sum / self.count
class EMA:
"""Exponential moving average of model parameters.
Args:
model (torch.nn.Module): Model with parameters whose EMA will be kept.
decay (float): Decay rate for exponential moving average.
"""
def __init__(self, model, decay):
self.decay = decay
self.shadow = {}
self.original = {}
# Register model parameters
for name, param in model.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def __call__(self, model, num_updates):
decay = min(self.decay, (1.0 + num_updates) / (10.0 + num_updates))
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = \
(1.0 - decay) * param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def assign(self, model):
"""Assign exponential moving average of parameter values to the
respective parameters.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone()
param.data = self.shadow[name]
def resume(self, model):
"""Restore original parameters to a model. That is, put back
the values that were in each parameter at the last call to `assign`.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name]
class CheckpointSaver:
"""Class to save and load model checkpoints.
Save the best checkpoints as measured by a metric value passed into the
`save` method. Overwrite checkpoints with better checkpoints once
`max_checkpoints` have been saved.
Args:
save_dir (str): Directory to save checkpoints.
max_checkpoints (int): Maximum number of checkpoints to keep before
overwriting old ones.
metric_name (str): Name of metric used to determine best model.
maximize_metric (bool): If true, best checkpoint is that which maximizes
the metric value passed in via `save`. Otherwise, best checkpoint
minimizes the metric.
log (logging.Logger): Optional logger for printing information.
"""
def __init__(self, save_dir, max_checkpoints, metric_name,
maximize_metric=False, log=None):
super(CheckpointSaver, self).__init__()
self.save_dir = save_dir
self.max_checkpoints = max_checkpoints
self.metric_name = metric_name
self.maximize_metric = maximize_metric
self.best_val = None
self.ckpt_paths = queue.PriorityQueue()
self.log = log
self._print('Saver will {}imize {}...'
.format('max' if maximize_metric else 'min', metric_name))
def is_best(self, metric_val):
"""Check whether `metric_val` is the best seen so far.
Args:
metric_val (float): Metric value to compare to prior checkpoints.
"""
if metric_val is None:
# No metric reported
return False
if self.best_val is None:
# No checkpoint saved yet
return True
return ((self.maximize_metric and self.best_val < metric_val)
or (not self.maximize_metric and self.best_val > metric_val))
def _print(self, message):
"""Print a message if logging is enabled."""
if self.log is not None:
self.log.info(message)
def save(self, step, model, metric_val, device):
"""Save model parameters to disk.
Args:
step (int): Total number of examples seen during training so far.
model (torch.nn.DataParallel): Model to save.
metric_val (float): Determines whether checkpoint is best so far.
device (torch.device): Device where model resides.
"""
ckpt_dict = {
'model_name': model.__class__.__name__,
'model_state': model.cpu().state_dict(),
'step': step
}
model.to(device)
checkpoint_path = os.path.join(self.save_dir,
'step_{}.pth.tar'.format(step))
torch.save(ckpt_dict, checkpoint_path)
self._print('Saved checkpoint: {}'.format(checkpoint_path))
if self.is_best(metric_val):
# Save the best model
self.best_val = metric_val
best_path = os.path.join(self.save_dir, 'best.pth.tar')
shutil.copy(checkpoint_path, best_path)
self._print('New best checkpoint at step {}...'.format(step))
# Add checkpoint path to priority queue (lowest priority removed first)
if self.maximize_metric:
priority_order = metric_val
else:
priority_order = -metric_val
self.ckpt_paths.put((priority_order, checkpoint_path))
# Remove a checkpoint if more than max_checkpoints have been saved
if self.ckpt_paths.qsize() > self.max_checkpoints:
_, worst_ckpt = self.ckpt_paths.get()
try:
os.remove(worst_ckpt)
self._print('Removed checkpoint: {}'.format(worst_ckpt))
except OSError:
# Avoid crashing if checkpoint has been removed or protected
pass
def load_model(model, checkpoint_path, gpu_ids, return_step=True):
"""Load model parameters from disk.
Args:
model (torch.nn.DataParallel): Load parameters into this model.
checkpoint_path (str): Path to checkpoint to load.
gpu_ids (list): GPU IDs for DataParallel.
return_step (bool): Also return the step at which checkpoint was saved.
Returns:
model (torch.nn.DataParallel): Model loaded from checkpoint.
step (int): Step at which checkpoint was saved. Only if `return_step`.
"""
device = 'cuda:{}'.format(gpu_ids[0]) if gpu_ids else 'cpu'
ckpt_dict = torch.load(checkpoint_path, map_location=device)
# Build model, load parameters
model.load_state_dict(ckpt_dict['model_state'])
if return_step:
step = ckpt_dict['step']
return model, step
return model
def get_available_devices():
"""Get IDs of all available GPUs.
Returns:
device (torch.device): Main device (GPU 0 or CPU).
gpu_ids (list): List of IDs of all GPUs that are available.
"""
gpu_ids = []
if torch.cuda.is_available():
gpu_ids += [gpu_id for gpu_id in range(torch.cuda.device_count())]
device = torch.device('cuda:{}'.format(gpu_ids[0]))
torch.cuda.set_device(device)
else:
device = torch.device('cpu')
return device, gpu_ids
def masked_softmax(logits, mask, dim=-1, log_softmax=False):
"""Take the softmax of `logits` over given dimension, and set
entries to 0 wherever `mask` is 0.
Args:
logits (torch.Tensor): Inputs to the softmax function.
mask (torch.Tensor): Same shape as `logits`, with 0 indicating
positions that should be assigned 0 probability in the output.
dim (int): Dimension over which to take softmax.
log_softmax (bool): Take log-softmax rather than regular softmax.
E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax.
Returns:
probs (torch.Tensor): Result of taking masked softmax over the logits.
"""
mask = mask.type(torch.float32)
masked_logits = mask * logits + (1 - mask) * -1e30
softmax_fn = F.log_softmax if log_softmax else F.softmax
probs = softmax_fn(masked_logits, dim)
return probs
def visualize(tbx, pred_dict, eval_path, step, split, num_visuals):
"""Visualize text examples to TensorBoard.
Args:
tbx (tensorboardX.SummaryWriter): Summary writer.
pred_dict (dict): dict of predictions of the form id -> pred.
eval_path (str): Path to eval JSON file.
step (int): Number of examples seen so far during training.
split (str): Name of data split being visualized.
num_visuals (int): Number of visuals to select at random from preds.
"""
if num_visuals <= 0:
return
if num_visuals > len(pred_dict):
num_visuals = len(pred_dict)
visual_ids = np.random.choice(list(pred_dict), size=num_visuals, replace=False)
with open(eval_path, 'r') as eval_file:
eval_dict = json.load(eval_file)
for i, id_ in enumerate(visual_ids):
pred = pred_dict[id_] or 'N/A'
example = eval_dict[str(id_)]
question = example['question']
context = example['context']
answers = example['answers']
gold = answers[0] if answers else 'N/A'
tbl_fmt = ('- **Question:** {}\n'
+ '- **Context:** {}\n'
+ '- **Answer:** {}\n'
+ '- **Prediction:** {}')
tbx.add_text(tag='{}/{}_of_{}'.format(split, i + 1, num_visuals),
text_string=tbl_fmt.format(question, context, gold, pred),
global_step=step)
def save_preds(preds, save_dir, file_name='predictions.csv'):
"""Save predictions `preds` to a CSV file named `file_name` in `save_dir`.
Args:
preds (list): List of predictions each of the form (id, start, end),
where id is an example ID, and start/end are indices in the context.
save_dir (str): Directory in which to save the predictions file.
file_name (str): File name for the CSV file.
Returns:
save_path (str): Path where CSV file was saved.
"""
# Validate format
if (not isinstance(preds, list)
or any(not isinstance(p, tuple) or len(p) != 3 for p in preds)):
raise ValueError('preds must be a list of tuples (id, start, end)')
# Make sure predictions are sorted by ID
preds = sorted(preds, key=lambda p: p[0])
# Save to a CSV file
save_path = os.path.join(save_dir, file_name)
np.savetxt(save_path, np.array(preds), delimiter=',', fmt='%d')
return save_path
def get_save_dir(base_dir, name, training, id_max=100):
"""Get a unique save directory by appending the smallest positive integer
`id < id_max` that is not already taken (i.e., no dir exists with that id).
Args:
base_dir (str): Base directory in which to make save directories.
name (str): Name to identify this training run. Need not be unique.
training (bool): Save dir. is for training (determines subdirectory).
id_max (int): Maximum ID number before raising an exception.
Returns:
save_dir (str): Path to a new directory with a unique name.
"""
for uid in range(1, id_max):
subdir = 'train' if training else 'test'
save_dir = os.path.join(base_dir, subdir, '{}-{:02d}'.format(name, uid))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
return save_dir
raise RuntimeError('Too many save directories created with the same name. \
Delete old save directories or use another name.')
def get_logger(log_dir, name):
"""Get a `logging.Logger` instance that prints to the console
and an auxiliary file.
Args:
log_dir (str): Directory in which to create the log file.
name (str): Name to identify the logs.
Returns:
logger (logging.Logger): Logger instance for logging events.
"""
class StreamHandlerWithTQDM(logging.Handler):
"""Let `logging` print without breaking `tqdm` progress bars.
See Also:
> https://stackoverflow.com/questions/38543506
"""
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
# Create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Log everything (i.e., DEBUG level and above) to a file
log_path = os.path.join(log_dir, 'log.txt')
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
# Log everything except DEBUG level (i.e., INFO level and above) to console
console_handler = StreamHandlerWithTQDM()
console_handler.setLevel(logging.INFO)
# Create format for the logs
file_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
file_handler.setFormatter(file_formatter)
console_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
console_handler.setFormatter(console_formatter)
# add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
def torch_from_json(path, dtype=torch.float32):
"""Load a PyTorch Tensor from a JSON file.
Args:
path (str): Path to the JSON file to load.
dtype (torch.dtype): Data type of loaded array.
Returns:
tensor (torch.Tensor): Tensor loaded from JSON file.
"""
with open(path, 'r') as fh:
array = np.array(json.load(fh))
tensor = torch.from_numpy(array).type(dtype)
return tensor
def discretize(p_start, p_end, max_len=30, no_answer=False):
"""Discretize soft predictions to get start and end indices.
Choose the pair `(i, j)` of indices that maximizes `p1[i] * p2[j]`
subject to `i <= j` and `j - i + 1 <= max_len`.
Args:
p_start (torch.Tensor): Soft predictions for start index.
Shape (batch_size, context_len).
p_end (torch.Tensor): Soft predictions for end index.
Shape (batch_size, context_len).
max_len (int): Maximum length of the discretized prediction.
I.e., enforce that `preds[i, 1] - preds[i, 0] + 1 <= max_len`.
no_answer (bool): Treat 0-index as the no-answer prediction. Consider
a prediction no-answer if `preds[0, 0] * preds[0, 1]` is greater
than the probability assigned to the max-probability span.
Returns:
start_idxs (torch.Tensor): Hard predictions for start index.
Shape (batch_size,)
end_idxs (torch.Tensor): Hard predictions for end index.
Shape (batch_size,)
"""
if p_start.min() < 0 or p_start.max() > 1 \
or p_end.min() < 0 or p_end.max() > 1:
raise ValueError('Expected p_start and p_end to have values in [0, 1]')
# Compute pairwise probabilities
p_start = p_start.unsqueeze(dim=2)
p_end = p_end.unsqueeze(dim=1)
p_joint = torch.matmul(p_start, p_end) # (batch_size, c_len, c_len)
# Restrict to pairs (i, j) such that i <= j <= i + max_len - 1
c_len, device = p_start.size(1), p_start.device
is_legal_pair = torch.triu(torch.ones((c_len, c_len), device=device))
is_legal_pair -= torch.triu(torch.ones((c_len, c_len), device=device),
diagonal=max_len)
if no_answer:
# Index 0 is no-answer
p_no_answer = p_joint[:, 0, 0].clone()
is_legal_pair[0, :] = 0
is_legal_pair[:, 0] = 0
else:
p_no_answer = None
p_joint *= is_legal_pair
# Take pair (i, j) that maximizes p_joint
max_in_row, _ = torch.max(p_joint, dim=2)
max_in_col, _ = torch.max(p_joint, dim=1)
start_idxs = torch.argmax(max_in_row, dim=-1)
end_idxs = torch.argmax(max_in_col, dim=-1)
if no_answer:
# Predict no-answer whenever p_no_answer > max_prob
max_prob, _ = torch.max(max_in_col, dim=-1)
start_idxs[p_no_answer > max_prob] = 0
end_idxs[p_no_answer > max_prob] = 0
return start_idxs, end_idxs
def convert_tokens(eval_dict, qa_id, y_start_list, y_end_list, no_answer):
"""Convert predictions to tokens from the context.
Args:
eval_dict (dict): Dictionary with eval info for the dataset. This is
used to perform the mapping from IDs and indices to actual text.
qa_id (int): List of QA example IDs.
y_start_list (list): List of start predictions.
y_end_list (list): List of end predictions.
no_answer (bool): Questions can have no answer. E.g., SQuAD 2.0.
Returns:
pred_dict (dict): Dictionary index IDs -> predicted answer text.
sub_dict (dict): Dictionary UUIDs -> predicted answer text (submission).
"""
pred_dict = {}
sub_dict = {}
for qid, y_start, y_end in zip(qa_id, y_start_list, y_end_list):
context = eval_dict[str(qid)]["context"]
spans = eval_dict[str(qid)]["spans"]
uuid = eval_dict[str(qid)]["uuid"]
if no_answer and (y_start == 0 or y_end == 0):
pred_dict[str(qid)] = ''
sub_dict[uuid] = ''
else:
if no_answer:
y_start, y_end = y_start - 1, y_end - 1
start_idx = spans[y_start][0]
end_idx = spans[y_end][1]
pred_dict[str(qid)] = context[start_idx: end_idx]
sub_dict[uuid] = context[start_idx: end_idx]
return pred_dict, sub_dict
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
if not ground_truths:
return metric_fn(prediction, '')
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def eval_dicts(gold_dict, pred_dict, no_answer):
avna = f1 = em = total = 0
for key, value in pred_dict.items():
total += 1
ground_truths = gold_dict[key]['answers']
prediction = value
em += metric_max_over_ground_truths(compute_em, prediction, ground_truths)
f1 += metric_max_over_ground_truths(compute_f1, prediction, ground_truths)
if no_answer:
avna += compute_avna(prediction, ground_truths)
eval_dict = {'EM': 100. * em / total,
'F1': 100. * f1 / total}
if no_answer:
eval_dict['AvNA'] = 100. * avna / total
return eval_dict
def compute_avna(prediction, ground_truths):
"""Compute answer vs. no-answer accuracy."""
return float(bool(prediction) == bool(ground_truths))
# All methods below this line are from the official SQuAD 2.0 eval script
# https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s):
"""Convert to lowercase and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_em(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = Counter(gold_toks) & Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
|
[
"simerjotkaur@Simerjots-MacBook-Pro.local"
] |
simerjotkaur@Simerjots-MacBook-Pro.local
|
8a5406e704c94bdc160a359c06d1a7a15a74b5d5
|
4a3d5904441535e77d39e1f6d9552433d297ce0a
|
/gcd.py
|
5daaec225885f7ed23f4af84a2f16cf3195aee13
|
[] |
no_license
|
Rinkikumari19/codechef_questions
|
b2a5d3b483f3aa8ef70d4af963f7315dca308e6e
|
2b1f991d3e2faad4ca5d7ce9af4fad0c35e7b087
|
refs/heads/master
| 2022-11-23T19:00:21.589006
| 2020-08-02T18:22:38
| 2020-08-02T18:22:38
| 280,680,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
T = int(input("any number"))
for i in range(T):
A,B = map(int, (input().split()))
if A>B:
z = B
else:
z = A
gcd = 1
while 1<=z:
if A%z==0 and B%z==0:
gcd=gcd*z
break
z=z-1
p = A/gcd
q = B/gcd
lcm = gcd*p*q
print(gcd,int(lcm))
|
[
"ravina18@navgurukul.org"
] |
ravina18@navgurukul.org
|
20240383cc09fbf52e4e7e8987d198d63ca29fe4
|
ec0e3b6188a527f0a61df96160ada9e1ab25bf03
|
/Øving 1/helloworldpython.py
|
9b627cb6501fb965e791a3cc6548d9cfdd840ccc
|
[] |
no_license
|
basbru/sanntid
|
cf0c6f820b6c67b611c32edaa863e5d578c80b80
|
94d040569fc52ac329bcc46e5168f4c61b97ef0a
|
refs/heads/master
| 2020-04-17T18:17:04.114920
| 2014-01-24T10:38:11
| 2014-01-24T10:38:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
# Python 3.3.3 and 2.7.6
# python helloworld_python.py
from threading import Thread
i = 0
def adder():
global i
for x in range(0, 1000000):
i += 1
def subtractor():
global i
for x in range(0,1000000):
i -= 1
def main():
adder_thr = Thread(target = adder)
adder_thr.start()
sub_thr = Thread(target = subtractor)
sub_thr.start()
adder_thr.join()
sub_thr.join()
print("Done: " + str(i))
main()
|
[
"tompersen@gmail.com"
] |
tompersen@gmail.com
|
dffca74d8efb61356362ea3eddd03076cd1dc1e6
|
30d30f6ee90db2a6ff001d58308d4ac1630ce5c4
|
/python-and-django-career/django-course/platzigram/posts/models.py
|
1952a5f981b71ac9312c6636188ea30690c6be63
|
[] |
no_license
|
mijaelrcf/Courses
|
1aafa84ac02f11fc461d5fb7bc6d837cda47dee7
|
f247d709b728acf73fbda2dad2a1d0a26a6c9c9a
|
refs/heads/master
| 2023-01-19T00:07:46.081216
| 2021-06-08T02:13:00
| 2021-06-08T02:13:00
| 241,657,673
| 0
| 0
| null | 2023-01-07T21:06:38
| 2020-02-19T15:57:47
|
Python
|
UTF-8
|
Python
| false
| false
| 626
|
py
|
"""Posts models."""
# Django
from django.db import models
from django.contrib.auth.models import User
class Post(models.Model):
"""Post model."""
user = models.ForeignKey(User, on_delete=models.CASCADE)
profile = models.ForeignKey('users.Profile', on_delete=models.CASCADE)
title = models.CharField(max_length=255)
photo = models.ImageField(upload_to='post/photo')
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
"""Return title and username."""
return '{} by @{}'.format(self.title, self.user.username)
|
[
"mijael.rcf@gmail.com"
] |
mijael.rcf@gmail.com
|
5d4572cad2e5848b95323c91c66820696b05e2f6
|
0b0abc06caa25dd269e1855d3cc6c72d34dc436c
|
/escuela/visitante/migrations/0002_recomendacion.py
|
245192f93bc17068dd4fc06196c491f630661924
|
[] |
no_license
|
escuela2021/escuelagithub
|
0130589214681d1ff9da36ffafd8aafb99c9b96d
|
f35897d1918af3a22d66b163153fc72a927516e8
|
refs/heads/master
| 2023-09-03T21:36:00.513263
| 2021-11-11T18:38:08
| 2021-11-11T18:38:08
| 427,109,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
# Generated by Django 3.2.4 on 2021-11-04 16:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0007_alter_curso_disciplina'),
('visitante', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='recomendacion',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('materia', models.TextField()),
('nivel', models.CharField(default='m', max_length=10)),
('curso', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.curso')),
],
),
]
|
[
"gjangoinminutes@gmail.com"
] |
gjangoinminutes@gmail.com
|
aa6f00f4d750aab07794d8848be0b5b24c905cfd
|
40da1c35e2b181947b38188e0661f9df763f1d23
|
/step2/step2.py
|
c467dab0c089089bc2c8a24abb5c6fb2aa33c018
|
[] |
no_license
|
tnakaicode/jburkardt-fipy
|
bff70cb8e891b4ea7593b90aad12841db1c78d99
|
77c57da29dc06bf91253af6e90a91e561a3ac413
|
refs/heads/master
| 2021-02-17T22:12:19.694733
| 2020-03-06T20:48:41
| 2020-03-06T20:48:41
| 245,130,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,194
|
py
|
#! /usr/bin/env python
#
from fenics import *
def step2 ( my_grid, my_degree ):
#*****************************************************************************80
#
## step2 solves a Poisson equation with piecewise constant diffusivity K(X,Y).
#
# Discussion:
#
# Use the mixed DPG method to solve a Poisson equation
# with a piecewise constant diffusivity function K(X).
#
# - div k grad u = f in Omega = [0,1]x[0,1]
# u = 0 on dOmega
#
# Use:
# k(x,y) = 1 if x < 1/3
# = 4 otherwise
# f(x,y) = 2*pi*pi*sin(pi*x) * sin(pi*y)
#
# For this case, I could have used an "Expression()" for k(x,y)
# involving the C ternary operator:
# k(x,y) = condition ? true value : false value
# but this can't be easily generalized to more complicated cases.
#
# The step1 problem used k(x,y)=1. Since step2 uses a changed k,
# the exact solution u is not known. However, we retain the
# source term f(x,y) and zero boundary conditions, for lack of any
# better case to consider.
#
# Modified:
#
# 24 October 2018
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer MY_GRID, the number of (pairs of) elements to use in the
# X and Y directions. Number of triangular elements is 2*my_grid*my_grid.
#
# Input, integer MY_DEGREE, specifies approximation degrees.
# Error indicators: MY_DEGREE+2
# Primary variable: MY_DEGREE+1
# Fluxes: MY_DEGREE
#
import matplotlib.pyplot as plt
import numpy as np
#
# Report input.
#
print ( '' )
print ( ' Case of %d x %d mesh' % ( my_grid, my_grid ) )
print ( ' Approximation degree %d:' % ( my_degree ) )
#
# Mesh the unit square.
#
my_mesh = UnitSquareMesh ( my_grid, my_grid )
#
# Plot the mesh.
#
plot ( my_mesh, title = 'step2 Mesh' )
filename = 'step2_mesh.png'
plt.savefig ( filename )
print ( ' Graphics saved as "%s"' % ( filename ) )
plt.close ( )
#
# Define the diffusivity function K(X,Y).
# The complication here is that the input "x" is not a Python numerical
# array, but a corresponding, but symbolic, object. So we have to use
# the UFL "conditional" function to express a test on X.
#
def diffusivity ( x ):
# value = conditional ( le ( 3.0 * x[0], 1.0 ), 1.0, 4.0 )
value = conditional ( le ( x[0] + x[1], 1.0 ), 1.0, 4.0 )
return value
#
# Plot the diffusivity function.
#
x = SpatialCoordinate ( my_mesh )
plot ( diffusivity ( x ), title = 'Step2 Diffusivity' )
filename = 'step2_diffusivity.png'
plt.savefig ( filename )
print ( ' Graphics saved as "%s"' % ( filename ) )
plt.close ( )
#
# Define the right hand side F(X,Y):
#
F = Expression ( '2 * pi * pi * sin ( pi * x[0] ) * sin ( pi * x[1] )', degree = 10 )
#
# Set function spaces for Error estimator, Primal variable, interfacial flux.
#
Es = FunctionSpace ( my_mesh, 'DG', my_degree + 2 )
Us = FunctionSpace ( my_mesh, 'CG', my_degree + 1 )
Qs = FunctionSpace ( my_mesh, 'BDM', my_degree, restriction = 'facet' )
#
# Set elements for Error estimator, Primal variable, interfacial flux.
#
Ee = FiniteElement ( 'DG', triangle, my_degree + 2 )
Ue = FiniteElement ( 'CG', triangle, my_degree + 1 )
Qe = FiniteElement ( 'BDM', triangle, my_degree )
#
# Define the mixed element, and the corresponding function space.
#
EUQe = MixedElement ( Ee, Ue, Qe )
EUQs = FunctionSpace ( my_mesh, EUQe )
#
# Extract the individual trial and test function factors from the space.
# Here, "e" for example is a symbol for typical trial functions from Es.
#
( e, u, q ) = TrialFunctions ( EUQs )
( et, ut, qt ) = TestFunctions ( EUQs )
#
# Compute the normal vectors.
#
n = FacetNormal ( my_mesh )
#
# Define the inner product for the error estimator space.
#
yip = dot ( grad ( e ), grad ( et ) ) * dx + e * et * dx
#
# Set up the saddle point problem:
#
# b( (u,q), et ) = ( k * grad u, grad et ) - < q.n, et >
# This is an equation for U and Q.
#
b1 = dot ( diffusivity ( x ) * grad ( u ), grad ( et ) ) * dx \
- dot ( q ( '+' ), n ( '+' ) ) * ( et ( '+' ) - et ( '-' ) ) * dS \
- dot ( q, n ) * et * ds
#
# b( (ut,qt), e ) = ( k * grad ut, grad e ) - < qt.n, e >
# This is an equation for E.
#
b2 = dot ( diffusivity ( x ) * grad ( ut ), grad ( e ) ) * dx \
- dot ( qt ( '+' ), n ( '+' ) ) * ( e ( '+' ) - e ( '-' ) ) * dS \
- dot ( qt, n ) * e * ds
#
# Set the saddle point problem:
#
# yip + b1 = F * et * dx
# b2 = 0
#
a = yip + b1 + b2
b = F * et * dx
#
# Apply the Dirichlet boundary condition to the second component (U).
#
U = Expression ( "0", degree = 10 )
bc = DirichletBC ( EUQs.sub(1), U, DomainBoundary() )
#
# Solve.
# This "e, u, q" is DIFFERENT from the e, u, q above!
# Here, "e" is the actual finite element solution function.
#
euq = Function ( EUQs )
solve ( a == b, euq, bcs = bc )
e, u, q = euq.split ( deepcopy = True )
#
# Plot the solution.
#
fig = plot ( u, title = 'step2 solution' )
plt.colorbar ( fig )
filename = 'step2_solution.png'
plt.savefig ( filename )
print ( ' Graphics saved as "%s"' % ( filename ) )
plt.close ( )
#
# Plot the error indicators.
#
fig = plot ( e, title = 'step2 indicators' )
plt.colorbar ( fig )
filename = 'step2_indicators.png'
plt.savefig ( filename )
print ( ' Graphics saved as "%s"' % ( filename ) )
plt.close ( )
#
# Terminate.
#
return
def step2_test ( ):
#*****************************************************************************80
#
## step2_test tests step2.
#
# Modified:
#
# 24 October 2018
#
# Author:
#
# John Burkardt
#
import time
print ( time.ctime ( time.time() ) )
print ( '' )
print ( 'step2:' )
print ( ' FENICS/Python version' )
print ( ' Step 2 in P-Laplacian investigation.' )
print ( ' Piecewise constant diffusivity.' )
#
# Report level = only warnings or higher.
#
level = 30
set_log_level ( level )
my_grid = 8
my_degree = 1
step2 ( my_grid, my_degree )
#
# Terminate.
#
print ( '' )
print ( 'step2:' );
print ( ' Normal end of execution.' )
print ( '' )
print ( time.ctime ( time.time() ) )
if ( __name__ == '__main__' ):
step2_test ( )
|
[
"t.nakai.office@gmail.com"
] |
t.nakai.office@gmail.com
|
00b25d2276786b6b8244c897fb30ad536921c44b
|
7801b0356b60de5a4fa6b214717a1c04942b5b62
|
/rbac/migrations/0002_remove_role_url.py
|
63539615dd84c16bdceb6e7d87e57c28cf76ff41
|
[] |
no_license
|
hqs2212586/CRM_demo
|
365652c61c991a2098d32b5db318d55cf29baa0b
|
941a896aef598d81750a96074bc63ccfaaadf0a5
|
refs/heads/master
| 2020-03-27T17:43:40.110992
| 2018-08-31T09:20:01
| 2018-08-31T09:20:01
| 146,869,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
# Generated by Django 2.0.6 on 2018-08-09 13:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rbac', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='role',
name='url',
),
]
|
[
"443514404@qq.com"
] |
443514404@qq.com
|
cba227d0538ffeaf8902eb489c3d21ca075255b1
|
85e1d38268bff735ccb1962e6649a97bfd890d21
|
/ReturnConfig/GetSlidingAndDoorConfigVersion/GetSlidingAndDoorConfig20191231_celery.py
|
6394db502dbd0c27c9664f5620c79e6a4ab47128
|
[] |
no_license
|
Thewolfinsociety/ServerCode
|
6e28d4c4c8e92f4c8db4d8afa5a24043efa870aa
|
596b90656edce9738d976109d587713bd916ed2d
|
refs/heads/master
| 2021-03-10T11:17:29.603364
| 2020-03-18T09:52:50
| 2020-03-18T09:52:50
| 246,447,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105,477
|
py
|
# -*- coding: utf-8 -*-
'''
服务器版
功能:返回趟门掩门配置
vesion 1.0.1
2019/11/19
author:litao
'''
# logging.basicConfig(level="DEBUG")
# _logging = logging.getLogger(__name__)
from xml.dom import minidom
import xml.etree.ElementTree as ET
import logging
import os
import sys
import math
import pypyodbc
import traceback
from lupa import LuaRuntime
import threading
import time
from bsddb3 import db
import json
import tornado
import asyncio
if os.getcwd()+'\\Python3\\' not in sys.path:
sys.path.append(os.getcwd()+'\\Python3\\')
if os.getcwd() + '\\Python3\\PythontoBomJson' not in sys.path:
sys.path.append(os.getcwd() + '\\Python3\\PythontoBomJson')
if os.getcwd() + '\\Python3\\ReturnConfig' not in sys.path:
sys.path.append(os.getcwd() + '\\Python3\\ReturnConfig')
# from ReturnConfig.SlidingAndDoor.funcGetSLoadXML2BomlidingJson import *
# from ReturnConfig.SlidingAndDoor.funcGetDoorJson import *
from ReturnConfig.ExpValue import GetExpValue
SADlog = logging.getLogger()
threadLock = threading.Lock()
log_dir = "log" # 日志存放文件夹名称
log_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), log_dir)
if not os.path.isdir(log_path):
os.makedirs(log_path)
# logger = logging.getLogger()
# logger.setLevel(logging.ERROR)
def outputlog():
main_log_handler = logging.FileHandler(log_path +
"/SAD_%s.log" % time.strftime("%Y-%m-%d_%H-%M-%S",
time.localtime(time.time())), mode="w+",
encoding="utf-8")
main_log_handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
main_log_handler.setFormatter(formatter)
SADlog.addHandler(main_log_handler)
# 控制台打印输出日志
console = logging.StreamHandler() # 定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象
console.setLevel(logging.INFO) # 设置要打印日志的等级,低于这一等级,不会打印
formatter = logging.Formatter("%(asctime)s - %(levelname)s: %(message)s")
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
outputlog()
#举例查看log的形式
SADlog.setLevel(logging.ERROR)
#全局变量部分
Sliding = {
'SfgParam': {
'HTxml': '<产品 名称="横2格门" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="$门芯1宽度" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="$门芯1宽度-$竖中横进槽" Y="0" Z="0" 宽="L" 深="$竖中横厚度" 高="$竖中横宽度" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="$门芯1宽度+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="L-$门芯1宽度-$竖中横宽度+2*$竖中横进槽" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/></我的模块><我的规格><规格 名称="竖2格门" 宽="800" 深="20" 高="1000"/></我的规格></产品>',
'Txml': '<产品 名称="竖2格门" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="$门芯1宽度" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="$门芯1宽度-$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横宽度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="$门芯1宽度+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="L-$门芯1宽度-$竖中横宽度+2*$竖中横进槽" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/></我的模块><我的规格><规格 名称="竖2格门" 宽="800" 深="20" 高="1000"/></我的规格></产品>',
'Sxml': '<产品 名称="竖3格门_两边均分" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2-$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="$门芯2宽度" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/><板件 名称="竖中横2" X="L-(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2-$竖中横宽度+$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="4"/><板件 名称="门芯3" X="L-(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" Y="$门芯3前偏移" Z="0" 宽="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" 深="$门芯3厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="5"/></我的模块><我的规格><规格 名称="竖3格门" 宽="800" 深="20" 高="1000"/></我的规格></产品>',
'Fxml': '<产品 名称="竖4格门_改123" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="$门芯1宽度" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="$门芯1宽度-$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="4" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="$门芯1宽度+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="$门芯2宽度" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/><板件 名称="竖中横2" X="$门芯1宽度+$门芯2宽度+$竖中横宽度-3*$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="4" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="4"/><板件 名称="门芯3" X="$门芯1宽度+$门芯2宽度+2*$竖中横宽度-4*$竖中横进槽" Y="$门芯3前偏移" Z="0" 宽="$门芯3宽度" 深="$门芯3厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="5"/><板件 名称="竖中横3" X="$门芯1宽度+$门芯2宽度+$门芯3宽度+2*$竖中横宽度-5*$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="4" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="6"/><板件 名称="门芯4" X="L-(L-$门芯1宽度-$门芯2宽度-$门芯3宽度-3*$竖中横宽度+6*$竖中横进槽)" Y="$门芯4前偏移" Z="0" 宽="L-$门芯1宽度-$门芯2宽度-$门芯3宽度-3*$竖中横宽度+6*$竖中横进槽" 深="$门芯4厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="7"/></我的模块><我的规格><规格 名称="竖3格门" 宽="900" 深="20" 高="1000"/></我的规格></产品>',
'HSxml': '<产品 名称="横3格门_两边均分" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2-$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="$门芯2宽度" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/><板件 名称="竖中横2" X="L-(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2-$竖中横宽度+$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="4"/><板件 名称="门芯3" X="L-(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" Y="$门芯3前偏移" Z="0" 宽="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" 深="$门芯3厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="5"/></我的模块><我的规格><规格 名称="竖3格门" 宽="800" 深="20" 高="1000"/></我的规格></产品>',
'HFxml': '<产品 名称="横4格门_改123" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="$门芯1宽度" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="$门芯1宽度-$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="$门芯1宽度+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="$门芯2宽度" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/><板件 名称="竖中横2" X="$门芯1宽度+$门芯2宽度+$竖中横宽度-3*$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="4"/><板件 名称="门芯3" X="$门芯1宽度+$门芯2宽度+2*$竖中横宽度-4*$竖中横进槽" Y="$门芯3前偏移" Z="0" 宽="$门芯3宽度" 深="$门芯3厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="5"/><板件 名称="竖中横3" X="$门芯1宽度+$门芯2宽度+$门芯3宽度+2*$竖中横宽度-5*$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="6"/><板件 名称="门芯4" X="L-(L-$门芯1宽度-$门芯2宽度-$门芯3宽度-3*$竖中横宽度+6*$竖中横进槽)" Y="$门芯4前偏移" Z="0" 宽="L-$门芯1宽度-$门芯2宽度-$门芯3宽度-3*$竖中横宽度+6*$竖中横进槽" 深="$门芯4厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="7"/></我的模块><我的规格><规格 名称="竖3格门" 宽="900" 深="20" 高="1000"/></我的规格></产品>'},
}
class PDoorsParam(object): #掩门参数
def __init__(self):
self.id = 0
self.deleted = False
self.name = ''
self.DoorsType = ''
self.handle = ''
self.wjname = ''
self.hboxname = ''
self.paneltype = ''
self.cap = 0
self.eb_cap = 0
self.vboxname = ''
self.udboxname = ''
self.vboxl = ''
self.udboxl = ''
self.vboxh = 0
self.udboxh = 0
self.vthick = 0
self.udthick = 0
self.vboxjtw = 0
self.udboxjtw = 0
self.hboxjtw = 0
self.udbox_hbox_value = 0
self.d3name = ''
self.hbox3d = ''
self.ubox3d = ''
self.dbox3d = ''
self.cpm_lmax = 0
self.cpm_hmax = 0
self.vdirect = ''
self.vfbstr = ''
self.uddirect = ''
self.udfbstr = ''
self.vmemo = ''
self.udmemo = ''
self.fbstr = ''
self.iscalc_framebom = 0
self.is_xq = 0
self.cb_yyvalue = 0
self.is_buy = 0
self.frame_valuel = 0
self.frame_valueh = 0
self.bomtype = ''
self.left_doorxml = ''
self.right_doorxml = ''
self.doorxml = ''
self.bdfile = ''
self.l_bdfile = ''
self.r_bdfile = ''
self.u_bdfile = ''
self.d_bdfile = ''
self.noframe_bom = 0
class TDoorDoorRect(object):
def __init__(self):
self.x0 = 0
self.y0 = 0
self.doorw = 0
self.doorh = 0
self.x1 = 0
self.y1 = 0
self.doorw1 = 0
self.doorh1 = 0
self.selected = False
self.hhdraw = False
self.mOpenDirect = '' #开门方向
self.mMemo = ''
self.mDoorW = 0
self.mDoorH = 0
self.mVBoxW = 0
self.mUDBoxH = 0
self.mVBoxW0 = 0
self.mUDBoxH0 = 0
self.mHandle = ''
self.mHandlePos = ''
self.mHandlePosX = ''
self.mHandlePosY = ''
self.mHandleX = 0
self.mHandleY = 0
self.mHandleW = 0
self.mHandleH = 0
self.mHinge = ''
self.mHingeCt = ''
self.mIsFrame = False
self.mHHArr = []
self.mPanelType = ''
self.mPanelColor = ''
self.boxlist = []
self.panellist = []
self.mYPos = 0
self.mPParam = PDoorsParam()
self.mHingeHoleDes = ''
self.mHingeHoleParam = ''
self.mHingeHoleExtra = ''
class TDoorRect(object):
def __init__(self):
self.doorw = 0
self.doorh = 0
self.x0 = 0
self.y0 = 0
self.doorw2 = 0
self.doorh2 = 0
self.selected = False
self.mUDBoxParam = {}
self.mVBoxParam = {}
self.mPanelType = ''
self.mPanelColor = ''
self.mVBoxColor = ''
self.boxlist = []
self.panellist = []
self.mYPos = 0
class DoorRectPanel(object):
def __init__(self):
self.selected = False
self.w0 = 0 #可视
self.h0 = 0
self.x0 = 0
self.y0 = 0
self.d0 = 0
self.w1 = 0
self.h1 = 0
self.x1 = 0
self.y1 = 0
self.d1 = 0
self.w2 = 0
self.h2 = 0
self.x2 = 0
self.y2 = 0
self.d2 = 0
self.PanelType = ''
self.color = ''
self.direct =''
self.pricetype = ''
self.color2 = ''
self.price = 0
self.price2 = 0
self.thick = 0
class RectPanel(object):
def __init__(self):
self.selected = False
self.w0 = 0 #可视
self.h0 = 0
self.x0 = 0
self.y0 = 0
self.d0 = 0
self.w1 = 0
self.h1 = 0
self.x1 = 0
self.y1 = 0
self.d1 = 0
self.w2 = 0
self.h2 = 0
self.x2 = 0
self.y2 = 0
self.d2 = 0
self.PanelType = ''
self.color = ''
self.direct =''
self.memo = ''
self.pricetype = ''
self.color2 = ''
self.price = 0
self.price2 = 0
self.extradata = ''
class DoorRectBox(object):
def __init__(self):
self.vh = False
self.selected = False
self.w0 = 0 #可视
self.h0 = 0
self.x0 = 0
self.y0 = 0
self.d0 = 0
self.w1 = 0
self.h1 = 0
self.x1 = 0
self.y1 = 0
self.d1 = 0
self.w2 = 0
self.h2 = 0
self.x2 = 0
self.y2 = 0
self.d2 = 0
self.boxtype = ''
self.color = ''
def getfirstchild(node):
Result = None
for child in node.childNodes:
if child.nodeType !=1: continue
Result = child
break
return Result
# def LoadXML2Bom(xmlfile):
# global config
# def EnumChild(root):
# for i in range(0, root.childNodes.length):
#
# node = root.childNodes[i]
# if node.nodeType != 1 : continue
# if root.nodeName == u'我的模块':
# string = node.getAttribute(u'显示方式')
# if (string == u'3'):
# root.childNodes.remove(node)
# continue
# if node.childNodes.length > 0:
# for j in range(0, node.childNodes.length): #
# cnode = node.childNodes[j] #产品节点
# if cnode.nodeType != 1: continue
# string = cnode.getAttribute(u'类别')
# if (string == u'趟门,趟门') or (string == u'掩门,掩门'):
# for k in range(0, cnode.childNodes.length): #
# ccnode = cnode.childNodes[k]
# if ccnode.nodeType != 1: continue
# if ccnode.nodeName == u'模板':
# cccnode = getfirstchild(ccnode)
# childxml = cccnode.toxml('utf8')
# SADlog.debug(childxml)
#
# EnumChild(node)
#
# DOMTree = minidom.parse(xmlfile) # xmlfilepath+xmlfile
# root = DOMTree.getElementsByTagName(u'产品')[0]
# node = getfirstchild(root)
#
# EnumChild(node)
# return config
def IsHasObj(oldlist, obj):
Result = False
if obj in oldlist:
Result = True
return Result
def Add2Config(name, obj):
if obj == {}:return
if name not in config['ymconfig']:
config['ymconfig'][name] = []
ishas = False
ishas = IsHasObj(config['ymconfig'][name], obj)
if ishas:return
config['ymconfig'][name].append(obj)
#SADlog.debug(name+json.dumps(obj,ensure_ascii=False))
def Add2SlidingConfig(name, obj):
if name not in config['tmconfig']:
config['tmconfig'][name] = []
ishas = IsHasObj(config['tmconfig'][name], obj)
if ishas: return
config['tmconfig'][name].append(obj)
#SADlog.debug(name+json.dumps(obj,ensure_ascii=False))
def arryFindstr(arr, string):
for k in range(len(arr)):
if (arr[k] == string):
return True
return False
def AddmWJBomDetailList(wjname, door_bh, opendirect, bktype):
pwjbom = GetWjBom(wjname)
if pwjbom:
Add2Config('mWJBomList', pwjbom) # 五金配件分类.cfg
for i in range(len(mWJBomDetailList)):
pwjbomdetail = mWJBomDetailList[i]
if ((pwjbomdetail['bomname'] == pwjbom['name']) and ((pwjbomdetail['door_bh'] == 0)
or (pwjbomdetail['door_bh'] == door_bh))
and ((pwjbomdetail['opendirect'] == '') or (pwjbomdetail['opendirect'] == opendirect))
and ((pwjbomdetail['bktype'] == '') or (arryFindstr(pwjbomdetail['bktypeAry'], bktype)))):
Add2Config('mWJBomDetailList', pwjbomdetail) # 五金配件分类数据.cfg
if pwjbomdetail:
pa = GetDoorAccessory(pwjbomdetail['name'])
Add2Config('mAccessoryList', pa)
def AddDoorPanelBomDetailList(bomclass, mat, color, color2, color3, pnll, pnlh):
for i in range(0, len(mDoorPanelBomDetailList)):
ppbdetail = mDoorPanelBomDetailList[i]
if ((ppbdetail['bomclass'] == bomclass) and (float(ppbdetail['lmin']) < float(pnll)) and (float(ppbdetail['lmax']) >= float(pnll)) and (
float(ppbdetail['hmin']) < float(pnlh)) and (float(ppbdetail['hmax']) >= float(pnlh))):
Add2Config('mDoorPanelBomDetailList', ppbdetail)
def DoorRecalcDoor(door, t1, t2, tt1, tt2, m, mGridItem):
if (mGridItem==6) and (len(door.panellist)==2): #// 两均分(下格固定)
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + (t2 / 1) * (0 + 0) + tt2 * m
rb.y1 = rb.y1 + (t2 / 1) * (0 + 0) + tt2 * m
rb.y2 = rb.y2 + (t2 / 1) * (0 + 0) + tt2 * m
rb.x0 = rb.x0 + tt1 * m
rb.x1 = rb.x1 + tt1 * m
rb.x2 = rb.x2 + tt1 * m
rb.w0 = rb.w0 + t1
rb.w1 = rb.w1 + t1
rb.w2 = rb.w2 + t1
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
n = 0
if j==1: pnl.h0 = pnl.h0 + (t2 / 1)
pnl.y0 = pnl.y0 + (t2 / 1) * n + tt2 * m
if j==1: pnl.h1 = pnl.h1 + (t2 / 1)
pnl.y1 = pnl.y1 + (t2 / 1) * n + tt2 * m
if j==1: pnl.h2 = pnl.h2 + (t2 / 1)
pnl.y2 = pnl.y2 + (t2 / 1) * n + tt2 * m
pnl.x0 = pnl.x0 + tt1 * m
pnl.x1 = pnl.x1 + tt1 * m
pnl.x2 = pnl.x2 + tt1 * m
pnl.w0 = pnl.w0 + t1
pnl.w1 = pnl.w1 + t1
pnl.w2 = pnl.w2 + t1
elif (mGridItem==8) and (len(door.panellist)==3) : #// 三格,中间格保持不变
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + (t2 / 2) * (0 + 1) + tt2 * m
rb.y1 = rb.y1 + (t2 / 2) * (0 + 1) + tt2 * m
rb.y2 = rb.y2 + (t2 / 2) * (0 + 1) + tt2 * m
rb.x0 = rb.x0 + tt1 * m
rb.x1 = rb.x1 + tt1 * m
rb.x2 = rb.x2 + tt1 * m
rb.w0 = rb.w0 + t1
rb.w1 = rb.w1 + t1
rb.w2 = rb.w2 + t1
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
n = j
if j == 2: n = 1
if j != 1: pnl.h0 = pnl.h0 + (t2 / 2)
pnl.y0 = pnl.y0 + (t2 / 2) * n + tt2 * m
if j != 1: pnl.h1 = pnl.h1 + (t2 / 2)
pnl.y1 = pnl.y1 + (t2 / 2) * n + tt2 * m
if j != 1: pnl.h2 = pnl.h2 + (t2 / 2)
pnl.y2 = pnl.y2 + (t2 / 2) * n + tt2 * m
pnl.x0 = pnl.x0 + tt1 * m
pnl.x1 = pnl.x1 + tt1 * m
pnl.x2 = pnl.x2 + tt1 * m
pnl.w0 = pnl.w0 + t1
pnl.w1 = pnl.w1 + t1
pnl.w2 = pnl.w2 + t1
elif (mGridItem==7) and (len(door.panellist)==2): #// 两均分(上格固定)
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + (t2 / 1) * (0 + 1) + tt2 * m
rb.y1 = rb.y1 + (t2 / 1) * (0 + 1) + tt2 * m
rb.y2 = rb.y2 + (t2 / 1) * (0 + 1) + tt2 * m
rb.x0 = rb.x0 + tt1 * m
rb.x1 = rb.x1 + tt1 * m
rb.x2 = rb.x2 + tt1 * m
rb.w0 = rb.w0 + t1
rb.w1 = rb.w1 + t1
rb.w2 = rb.w2 + t1
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
n = 0
if j==1: n =1
if j==0: pnl.h0 =pnl.h0 + (t2 / 1)
pnl.y0 = pnl.y0 + (t2 / 1) * n + tt2 * m
if j==0: pnl.h1 =pnl.h1 + (t2 / 1)
pnl.y1 = pnl.y1 + (t2 / 1) * n + tt2 * m
if j==0: pnl.h2 =pnl.h2 + (t2 / 1)
pnl.y2 = pnl.y2 + (t2 / 1) * n + tt2 * m
pnl.x0 = pnl.x0 + tt1 * m
pnl.x1 = pnl.x1 + tt1 * m
pnl.x2 = pnl.x2 + tt1 * m
pnl.w0 = pnl.w0 + t1
pnl.w1 = pnl.w1 + t1
pnl.w2 = pnl.w2 + t1
elif (mGridItem==9) and (len(door.panellist)==3) : #// 三均分(上两格固定)
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + (t2 / 1) * (0 + 1) + tt2 * m
rb.y1 = rb.y1 + (t2 / 1) * (0 + 1) + tt2 * m
rb.y2 = rb.y2 + (t2 / 1) * (0 + 1) + tt2 * m
rb.x0 = rb.x0 + tt1 * m
rb.x1 = rb.x1 + tt1 * m
rb.x2 = rb.x2 + tt1 * m
rb.w0 = rb.w0 + t1
rb.w1 = rb.w1 + t1
rb.w2 = rb.w2 + t1
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
n = 0
if j > 0: n = 1
if j==0: pnl.h0 = pnl.h0 + (t2 / 1)
pnl.y0 = pnl.y0 + (t2 / 1) * n + tt2 * m
if j==0: pnl.h1 = pnl.h1 + (t2 / 1)
pnl.y1 = pnl.y1 + (t2 / 1) * n + tt2 * m
if j==0: pnl.h2 = pnl.h2 + (t2 / 1)
pnl.y2 = pnl.y2 + (t2 / 1) * n + tt2 * m
pnl.x0 = pnl.x0 + tt1 * m
pnl.x1 = pnl.x1 + tt1 * m
pnl.x2 = pnl.x2 + tt1 * m
pnl.w0 = pnl.w0 + t1
pnl.w1 = pnl.w1 + t1
pnl.w2 = pnl.w2 + t1
elif (mGridItem==10) and (len(door.panellist)==3) : #// 三均分(上两格固定)
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + (t2 / 1) * (0 + 0) + tt2 * m
rb.y1 = rb.y1 + (t2 / 1) * (0 + 0) + tt2 * m
rb.y2 = rb.y2 + (t2 / 1) * (0 + 0) + tt2 * m
rb.x0 = rb.x0 + tt1 * m
rb.x1 = rb.x1 + tt1 * m
rb.x2 = rb.x2 + tt1 * m
rb.w0 = rb.w0 + t1
rb.w1 = rb.w1 + t1
rb.w2 = rb.w2 + t1
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
n = 0
if j==2 : pnl.h0 = pnl.h0 + (t2 / 1)
pnl.y0 = pnl.y0 + (t2 / 1) * n + tt2 * m
if j==2 : pnl.h1 = pnl.h1 + (t2 / 1)
pnl.y1 = pnl.y1 + (t2 / 1) * n + tt2 * m
if j==2 : pnl.h2 = pnl.h2 + (t2 / 1)
pnl.y2 = pnl.y2 + (t2 / 1) * n + tt2 * m
pnl.x0 = pnl.x0 + tt1 * m
pnl.x1 = pnl.x1 + tt1 * m
pnl.x2 = pnl.x2 + tt1 * m
pnl.w0 = pnl.w0 + t1
pnl.w1 = pnl.w1 + t1
pnl.w2 = pnl.w2 + t1
else:
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + (t2 / len(door.panellist)) * (j + 1) + tt2 * m
rb.y1 = rb.y1 + (t2 / len(door.panellist)) * (j + 1) + tt2 * m
rb.y2 = rb.y2 + (t2 / len(door.panellist)) * (j + 1) + tt2 * m
rb.x0 = rb.x0 + tt1 * m
rb.x1 = rb.x1 + tt1 * m
rb.x2 = rb.x2 + tt1 * m
rb.w0 = rb.w0 + t1
rb.w1 = rb.w1 + t1
rb.w2 = rb.w2 + t1
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
pnl.h0 = pnl.h0 + (t2 / len(door.panellist))
pnl.y0 = pnl.y0 + (t2 / len(door.panellist)) * j + tt2 * m
pnl.h1 = pnl.h1 + (t2 / len(door.panellist))
pnl.y1 = pnl.y1 + (t2 / len(door.panellist)) * j + tt2 * m
pnl.h2 = pnl.h2 + (t2 / len(door.panellist))
pnl.y2 = pnl.y2 + (t2 / len(door.panellist)) * j + tt2 * m
pnl.x0 = pnl.x0 + tt1 * m
pnl.x1 = pnl.x1 + tt1 * m
pnl.x2 = pnl.x2 + tt1 * m
pnl.w0 = pnl.w0 + t1
pnl.w1 = pnl.w1 + t1
pnl.w2 = pnl.w2 + t1
def RecalcDoor(door, t1, t2, hh, mGridItem):
if (mGridItem == 6) and (len(door.panellist)==2): # 两均分(下格固定)
t2 = hh
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + t2 * (0 + 0)
rb.y1 = rb.y1 + t2 * (0 + 0)
rb.y2 = rb.y2 + t2 * (0 + 0)
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
if j==1:
pnl.h0 = pnl.h0 + t2
pnl.y0 = pnl.y0
pnl.h1 = pnl.h1 + t2
pnl.y1 = pnl.y1
pnl.h2 = pnl.h2 + t2
pnl.y2 = pnl.y2
elif (mGridItem == 7) and (len(door.panellist)==2): # 两均分,上格固定
t2 = hh
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + t2 * (0 + 1)
rb.y1 = rb.y1 + t2 * (0 + 1)
rb.y2 = rb.y2 + t2 * (0 + 1)
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
if j==1:
pnl.h0 = pnl.h0
pnl.y0 = pnl.y0 + t2
pnl.h1 = pnl.h1
pnl.y1 = pnl.y1 + t2
pnl.h2 = pnl.h2
pnl.y2 = pnl.y2 + t2
elif j==0:
pnl.h0 = pnl.h0 + t2
pnl.y0 = pnl.y0
pnl.h1 = pnl.h1 + t2
pnl.y1 = pnl.y1
pnl.h2 = pnl.h2 + t2
pnl.y2 = pnl.y2
elif (mGridItem == 8) and (len(door.panellist)==3): # 三格,中间格固定
t2 = hh / 2
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + t2 * (0 + 1)
rb.y1 = rb.y1 + t2 * (0 + 1)
rb.y2 = rb.y2 + t2 * (0 + 1)
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
if j==1:
pnl.h0 = pnl.h0
pnl.y0 = pnl.y0 + t2
pnl.h1 = pnl.h1
pnl.y1 = pnl.y1 + t2
pnl.h2 = pnl.h2
pnl.y2 = pnl.y2 + t2
elif j==0:
pnl.h0 = pnl.h0 + t2
pnl.y0 = pnl.y0
pnl.h1 = pnl.h1 + t2
pnl.y1 = pnl.y1
pnl.h2 = pnl.h2 + t2
pnl.y2 = pnl.y2
elif j==2:
pnl.h0 = pnl.h0 + t2
pnl.y0 = pnl.y0 + t2
pnl.h1 = pnl.h1 + t2
pnl.y1 = pnl.y1 + t2
pnl.h2 = pnl.h2 + t2
pnl.y2 = pnl.y2 + t2
elif (mGridItem == 9) and (len(door.panellist)==3): # 三均分(上两格固定)
t2 = hh
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + t2 * (0 + 1)
rb.y1 = rb.y1 + t2 * (0 + 1)
rb.y2 = rb.y2 + t2 * (0 + 1)
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
if j > 0:
pnl.h0 = pnl.h0
pnl.y0 = pnl.y0 + t2
pnl.h1 = pnl.h1
pnl.y1 = pnl.y1 + t2
pnl.h2 = pnl.h2
pnl.y2 = pnl.y2 + t2
elif j==0:
pnl.h0 = pnl.h0 + t2
pnl.y0 = pnl.y0
pnl.h1 = pnl.h1 + t2
pnl.y1 = pnl.y1
pnl.h2 = pnl.h2 + t2
pnl.y2 = pnl.y2
elif (mGridItem == 10) and (len(door.panellist)==3): # 三均分(下两格固定) }
t2 = hh
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + t2 * (0 + 0)
rb.y1 = rb.y1 + t2 * (0 + 0)
rb.y2 = rb.y2 + t2 * (0 + 0)
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
if j > 1:
pnl.h0 = pnl.h0 + t2
pnl.y0 = pnl.y0
pnl.h1 = pnl.h1 + t2
pnl.y1 = pnl.y1
pnl.h2 = pnl.h2 + t2
pnl.y2 = pnl.y2
elif j==0:
pnl.h0 = pnl.h0
pnl.y0 = pnl.y0
pnl.h1 = pnl.h1
pnl.y1 = pnl.y1
pnl.h2 = pnl.h2
pnl.y2 = pnl.y2
else:
t2 = hh / (len(door.panellist))
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
rb.y0 = rb.y0 + t2 * (j + 1)
rb.y1 = rb.y1 + t2 * (j + 1)
rb.y2 = rb.y2 + t2 * (j + 1)
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
pnl.h0 = pnl.h0 + t2
pnl.y0 = pnl.y0 + t2 * j
pnl.h1 = pnl.h1 + t2
pnl.y1 = pnl.y1 + t2 * j
pnl.h2 = pnl.h2 + t2
pnl.y2 = pnl.y2 + t2 * j
def getymconfig(xmltemplate):
root = ET.fromstring(xmltemplate)
attri = root.get('门洞宽','0')
mL = float(attri)
attri = root.get('门洞高', '0')
mH = float(attri)
attri = root.get('单门数量类型', '')
mPExp = GetDoorsExp(attri)
Add2Config('mExpList', mPExp)
string = root.get('门类型', '')
mPType = GetDoorsType(string)
Add2Config('mTypeList', mPType)
attri = root.get('门框类型', '')
mPParam = GetDoorsParam(string, attri)
mMyVBoxColor = root.get('门框颜色', '')
Add2Config('mParamList', mPParam)
attri = root.get('中横框类型', '')
mPHBoxParam = GetHBoxParam(attri)
Add2Config('mDoorHBoxParamList', mPHBoxParam)
mGridItem = 0
attri = root.get('均分')
if attri != None : mGridItem = int(attri)
mDataMode = int(root.get('DataMode', '0'))
m = -1
if (mPExp == {}) or (mPType == {}) or (mPParam == {}):
mCopyDoor = -1
return
mIsVertical = False
attri = root.get('是否竖排',False)
if (attri != None) and (attri == 'True') :
mIsVertical = True
m = -1
mDoorsList = []
for i in range(0, len(root)):
node = root[i]
if (node.tag != '单门'): continue
m = m + 1
door = TDoorDoorRect()
mDoorsList.append(door)
door.mPParam = mPParam
door.mHandle = node.get('拉手', '')
door.mOpenDirect = node.get('打开方向', '')
door.mHinge = node.get('门铰', '')
for j in range(0, len(node)):
cnode = node[j]
if cnode.tag != '门芯' : continue
pnl = DoorRectPanel()
door.panellist.append(pnl)
pnl.PanelType = cnode.get('类型', '')
pnl.color = cnode.get('颜色', '')
pnl.color2 = cnode.get('颜色2', '')
attri = cnode.get('w0', '0')
pnl.w0 = float(attri)
attri = cnode.get('h0', '0')
pnl.h0 = float(attri)
attri = cnode.get('x0', '0')
pnl.x0 = float(attri)
attri = cnode.get('y0', '0')
pnl.y0 = float(attri)
attri = cnode.get('d0', '0')
pnl.d0 = float(attri)
attri = cnode.get('w1', '0')
pnl.w1 = float(attri)
attri = cnode.get('h1', '0')
pnl.h1 = float(attri)
attri = cnode.get('x1', '0')
pnl.x1 = float(attri)
attri = cnode.get('y1', '0')
pnl.y1 = float(attri)
attri = cnode.get('d1', '0')
pnl.d1 = float(attri)
attri = cnode.get('w2', '0')
pnl.w2 = float(attri)
attri = cnode.get('h2', '0')
pnl.h2 = float(attri)
attri = cnode.get('x2', '0')
pnl.x2 = float(attri)
attri = cnode.get('y2', '0')
pnl.y2 = float(attri)
attri = cnode.get('d2', '0')
pnl.d2 = float(attri)
#中横框
for i in range(0, len(node)):
cnode = node[i]
if (cnode.tag != '中横框'): continue
rb = DoorRectBox()
rb.selected = False
door.boxlist.append(rb)
attri = cnode.get('类型', '0')
rb.boxtype = attri
attri = cnode.get('颜色', '0')
rb.color = attri
rb.vh = True
attri = cnode.get('vh', '0')
if attri == 'False':
rb.vh = False
attri = cnode.get('w0', '0')
rb.w0 = float(attri)
attri = cnode.get('h0', '0')
rb.h0 = float(attri)
attri = cnode.get('x0', '0')
rb.x0 = float(attri)
attri = cnode.get('y0', '0')
rb.y0 = float(attri)
attri = cnode.get('d0', '0')
rb.d0 = float(attri)
attri = cnode.get('w1', '0')
rb.w1 = float(attri)
attri = cnode.get('h1', '0')
rb.h1 = float(attri)
attri = cnode.get('x1', '0')
rb.x1 = float(attri)
attri = cnode.get('y1', '0')
rb.y1 = float(attri)
attri = cnode.get('d1', '0')
rb.d1 = float(attri)
attri = cnode.get('w2', '0')
rb.w2 = float(attri)
attri = cnode.get('h2', '0')
rb.h2 = float(attri)
attri = cnode.get('x2', '0')
rb.x2 = float(attri)
attri = cnode.get('y2', '0')
rb.y2 = float(attri)
attri = cnode.get('d2', '0')
rb.d2 = float(attri)
for i in range(0, len(mDoorsList)):
door = mDoorsList[i]
bh = Delphi_Round(mPType['depth'])
SADlog.debug('拉手=' + door.mHandle)
phandle = GetDoorsHandle(door.mHandle)
Add2Config('mHandleList', phandle) # 拉手
SADlog.debug('门铰=' + door.mHinge)
phinge = GetDoorsHinge(door.mHinge, mPType)
Add2Config('mHingeList', phinge) # 门铰
AddmWJBomDetailList(phandle['wjname'], bh, door.mOpenDirect, mPParam['name'])
AddmWJBomDetailList(phinge['wjname'], bh, door.mOpenDirect, mPParam['name'])
if (mPType['isframe']):
#门芯
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
pnltype = GetDoorPanelType(mPParam['name'], pnl.PanelType) #GetDoorPanelType
Add2Config('mDoorPanelTypeList', pnltype) # 百叶板配置
if pnltype:
mytype = pnltype['mytype']
AddDoorPanelBomDetailList(pnltype['panelbom'], pnl.PanelType, pnl.color, pnl.color2, mMyVBoxColor,
pnl.w1, pnl.h1)
SADlog.debug('门芯类型=' + pnl.PanelType)
pssexp = GetDoorSSExp(pnl.PanelType)
Add2Config('mShutterExpList', pssexp) #百叶板配置
SADlog.debug('颜色=' + pnl.color)
pcolorclass = GetColorClass('门芯',pnl.color)
Add2Config('mColorClassList', pcolorclass) # 颜色分类 门芯颜色
#中横框
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
boxtype = rb.boxtype
hbox = GetHBoxParam(boxtype)
Add2Config('mDoorHBoxParamList', hbox)
wjname = hbox['wjname']
AddmWJBomDetailList(wjname, bh, door.mOpenDirect, mPParam['name'])
else:
if ( len(door.panellist) > 0 ):
pnl = door.panellist[0]
pnltype = GetDoorPanelType(mPParam['name'], pnl.PanelType)
if ( pnltype ): bh = pnltype['thick']
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
pnltype = GetDoorPanelType(mPParam['name'], pnl.PanelType) # GetDoorPanelType
Add2Config('mDoorPanelTypeList', pnltype)
if pnltype:
AddDoorPanelBomDetailList(pnltype['panelbom'], pnl.PanelType, pnl.color, pnl.color2, mMyVBoxColor,
pnl.w1, pnl.h1)
SADlog.debug('门芯类型=' + pnl.PanelType)
pssexp = GetDoorSSExp(pnl.PanelType)
Add2Config('mShutterExpList', pssexp) # 百叶板配置
SADlog.debug('颜色=' + pnl.color)
pcolorclass = GetColorClass('门芯', pnl.color)
Add2Config('mColorClassList', pcolorclass) # 颜色分类 门芯颜色
pcolorclass2 = GetColorClass2(mPParam['name'], mMyVBoxColor)
Add2Config('mColorClass2List', pcolorclass2) # 颜色分类2 门框颜色
if (mDataMode == 0):
wjname = mPParam['wjname']
door_bh, opendirect, bktype = 0, '', mPParam['name']
AddmWJBomDetailList(wjname, door_bh, opendirect, bktype)
left_doorxml = mPParam['left_doorxml']
pxml = returnxml(left_doorxml)
Add2Config('mDoorXMLList', pxml)
right_doorxml = mPParam['right_doorxml']
pxml = returnxml(right_doorxml)
Add2Config('mDoorXMLList', pxml)
doorxml = mPParam['doorxml']
pxml = returnxml(doorxml)
Add2Config('mDoorXMLList', pxml)
def GetWjBomDetaildata(wjname, skcolor1, skcolor2, skcolor3, skcolor4):
result = {}
pa = {}
for m in range(0 , len(mSlidingWjBomDetailList)):
pbomdetail = mSlidingWjBomDetailList[m]
if pbomdetail['bomname'] == wjname :
pa = GetSlidingAccessory(pbomdetail['name'])
if pa:
color = ToColor(pa['color'], skcolor1, skcolor2, skcolor3, skcolor4)
Add2SlidingConfig('SlidingAccessory', pa)
pcolorclass = GetSlidingColorClass('配件', pa['name'], color)
Add2SlidingConfig('SlidingColorClass', pcolorclass)
return result
################################趟门
def GetSlidingExp(name):
Result = {}
for i in range(0, len(mSlidingExpList)):
if (mSlidingExpList[i]['name'] == name):
Result = mSlidingExpList[i]
return Result
def GetSlidingType(name):
Result = {}
for i in range(0, len(mSlidingTypeList)):
if (mSlidingTypeList[i]['name'] == name):
Result = mSlidingTypeList[i]
return Result
def GetSlidingParam(name):
Result = {}
for i in range(0, len(mSlidingParamList)):
if (mSlidingParamList[i]['name'] == name):
Result = mSlidingParamList[i]
return Result
def GetTrackParam(name):
Result = {}
for i in range(0, len(mTrackParamList)):
if (mTrackParamList[i]['name'] == name):
Result = mTrackParamList[i]
return Result
def GetSlidingHBoxParam(name):
Result = {}
for i in range(0, len(mHBoxParamList)):
if (mHBoxParamList[i]['name'] == name):
Result = mHBoxParamList[i]
return Result
def GetUDBoxParam(name):
Result = {}
for i in range(0, len(mUDBoxParamList)):
if (mUDBoxParamList[i]['name'] == name):
Result = mUDBoxParamList[i]
return Result
def GetVBoxParam(name):
Result = {}
for i in range(0, len(mVBoxParamList)):
if (mVBoxParamList[i]['name'] == name):
Result = mVBoxParamList[i]
return Result
def GetSlidingColorClass(*argv):
if len(argv) == 3:
styp = argv[0]
stype2 = argv[1]
clor = argv[2]
Result = {}
for i in range(0,len(mSlidingColorClassList)):
if ((mSlidingColorClassList[i]['myclass'] == styp) and (mSlidingColorClassList[i]['mat'] == stype2) and
((clor == mSlidingColorClassList[i]['color']))):
Result = mSlidingColorClassList[i]
break
return Result
if len(argv) == 2:
myclass = argv[0]
color = argv[1]
Result = {}
for i in range(0, len(mSlidingColorClassList)):
if ((mSlidingColorClassList[i]['myclass'] == myclass) and
((color != '') or (color == mSlidingColorClassList[i]['color']))):
Result = mSlidingColorClassList[i]
break
return Result
def GetSlidingAccessory(name):
Result = {}
for k in range(0,len(mSlidingAccessoryList)):
if(mSlidingAccessoryList[k]['name'] == name):
Result = mSlidingAccessoryList[k]
break
return Result
def GetSlidingPanelType(bktype, name):
result = None
for ii in range(len(PanelTypeList)):
if (PanelTypeList[ii]['name'] == name and PanelTypeList[ii]['bktype'] == bktype):
result = PanelTypeList[ii]
return result
def GetSlidingSSExp(name):
Result = {}
for i in range(0, len(mSSExpList)):
if (mSSExpList[i]['paneltype'] == name):
Result = mSSExpList[i]
return Result
def ToColor(c, c1, c2, c3, c4): #//:string;
Result = c
if( c=='$竖框配件颜色1' ): Result = c1
if( c=='$竖框配件颜色2' ): Result = c2
if( c=='$竖框配件颜色3' ): Result = c3
if( c=='$竖框配件颜色4' ): Result = c4
return Result
def GetPanelBomdata(bomclass, mat, color, color2, color3, pnll, pnlh):
result = {}
for i in range(0, len(mPanelBomDetailList)):
p = mPanelBomDetailList[i]
if ((p['bomclass'] == bomclass) and (float(p['lmin']) < float(pnll)) and (float(p['lmax']) >= float(pnll)) and (
float(p['hmin']) < float(pnlh)) and (float(p['hmax']) >= float(pnlh))):
result = p
return result
#################################掩门
def GetDoorsExp(name):
Result = {}
for i in range(0, len(mExpList)):
if (mExpList[i]['name'] == name):
Result = mExpList[i]
return Result
# 门类型
def GetDoorsType(name):
Result = {}
for i in range(0, len(mTypeList)):
if (mTypeList[i]['name'] == name):
Result = mTypeList[i]
return Result
# 掩门参数
def GetDoorsParam(name1, name2):
Result = {}
for i in range(0, len(mParamList)):
if (mParamList[i]['name'] == name2 and mParamList[i]['DoorsType'] == name1):
Result = mParamList[i]
return Result
# 中横框类型
def GetHBoxParam(name):
Result = {}
for i in range(0, len(mDoorHBoxParamList)):
if (mDoorHBoxParamList[i]['name'] == name):
Result = mDoorHBoxParamList[i]
return Result
# 拉手
def GetDoorsHandle(name):
Result = {}
for i in range(0, len(mHandleList)):
if (mHandleList[i]['name'] == name):
Result = mHandleList[i]
return Result
# 门铰
def GetDoorsHinge(mj, dt):
Result = None
if (not dt): return Result
if (mj == ''): return Result
for i in range(0, len(mHingeList)):
p = mHingeList[i]
if (p['name'] == mj):
Result = p
return Result
# 百叶板配置
def GetWjBom(name):
Result = {}
for i in range(len(mWJBomList)):
pwjbom = mWJBomList[i]
if (pwjbom['name'] == name):
Result = pwjbom
return Result
def GetDoorAccessory(name):
Result = {}
for i in range(len(mAccessoryList)):
pa = mAccessoryList[i]
if (pa['name'] == name):
Result = pa
return Result
def GetColorClass(myclass, color):
Result = {}
for i in range(len(mColorClassList)):
p = mColorClassList[i]
if (p['myclass'] == myclass and p['color'] == color):
Result = p
return Result
def GetColorClass2(bktype, color):
Result = {}
for i in range(len(mColorClass2List)):
pa = mColorClass2List[i]
if (pa['bktype'] == bktype and pa['color'] == color):
Result = pa
return Result
def GetDoorPanelType(bktype, name):
Result = {}
for i in range(len(mDoorPanelTypeList)):
p = mDoorPanelTypeList[i]
if (p['name'] == name and p['bktype'] == '*'):
Result = p
return Result
if (p['name'] == name and p['bktype'] == bktype):
Result = p
return Result
return Result
def GetDoorSSExp(name):
Result = {}
for i in range(0, len(mShutterExpList)):
if (mShutterExpList[i]['paneltype'] == name):
Result = mShutterExpList[i]
return Result
def GetSSExp(name):
Result = {}
for i in range(0, len(mShutterExpList)):
if (mShutterExpList[i]['paneltype'] == name):
Result = mShutterExpList[i]
return Result
def returnxml(string):
p = {}
for i in range(0, len(mDoorXMLList)):
p = mDoorXMLList[i]
if (string == p['name']):
break
return p
def gettmconfig(xmltemplate):
root = ET.fromstring(xmltemplate)
attri = root.get('门洞宽', '0')
mL = int(attri)
attri = root.get('门洞高', '0')
mH = int(attri)
attri = root.get('延长导轨','0')
mAddLength = int(attri)
attri = root.get('单门数量类型', '')
pexp = GetSlidingExp(attri) #单门数量类型
Add2SlidingConfig('SlidingExp', pexp)
attri = root.get('门类型','')
pstype = GetSlidingType(attri)
Add2SlidingConfig('SlidingType', pstype)
attri = root.get('边框类型', '')
psp = GetSlidingParam(attri)
Add2SlidingConfig('SlidingParam', psp)
attri = root.get('上下横框类型', '')
pudbox = GetUDBoxParam(attri)
Add2SlidingConfig('UDBoxParam', pudbox)
attri = root.get('上下轨类型', '')
ptrack = GetTrackParam(attri)
Add2SlidingConfig('TrackParam', ptrack)
attri = root.get('中横框类型', '')
phbox = GetSlidingHBoxParam(attri)
Add2SlidingConfig('HboxParam', phbox)
pvbox = {}
if psp: pvbox = GetVBoxParam(psp['vboxtype'])
Add2SlidingConfig('VBoxParam', pvbox)
attri = root.get('门板类型', '')
if attri:
mMyPanelType = attri
else:
mMyPanelType = ''
attri = root.get('门颜色', '')
if attri:
mMySlidingColor = attri
else:
mMySlidingColor = ''
attri = root.get('竖框颜色','')
if attri : mMyVBoxColor = attri
else: mMyVBoxColor = ''
attri = root.get('上横框颜色', '')
if attri:
mMyUpBoxColor = attri
else:
mMyUpBoxColor = ''
attri = root.get('下横框颜色', '')
if attri:
mMyDownBoxColor = attri
else:
mMyDownBoxColor = ''
attri = root.get('上轨颜色', '')
if attri : mMyUpTrackColor = attri
else: mMyUpTrackColor =''
attri = root.get('下轨颜色', '')
if attri:
mMyDownTrackColor = attri
else:
mMyDownTrackColor = ''
attri = root.get('中横框颜色', '')
if attri:
mMyHBoxColor = attri
else:
mMyHBoxColor = ''
attri = root.get('门板颜色', '')
if attri:
mMyPanelColor = attri
else:
mMyPanelColor = ''
mDataMode = int(root.get('DataMode', '0'))
mGridItem = 0
attri = root.get('均分')
if attri != None: mGridItem = int(attri)
if (pexp =={}) or (pstype == {}) or (psp == {}) or (pudbox =={}) \
or (ptrack == {}) or (phbox =={}) or (pvbox == {}):
return
nHasMzhb = False #门转换表
mSlidingExp = pexp
mSlidingParam = psp
mSlidingType = pstype
mTrackParam = ptrack
UDBoxParam = pudbox
HBoxParam = phbox
VBoxParam = pvbox
mDoorsList = []
m = -1
for i in range(0, len(root)):
node = root[i]
if node.tag !='单门': continue
m = m+1
door = TDoorRect()
mDoorsList.append(door)
attri = node.get('宽')
door.doorw = float(attri)
attri = node.get('高')
door.doorh = float(attri)
attri = node.get('X0')
door.x0 = float(attri)
attri = node.get('Y0')
door.y0 = float(attri)
attri = node.get('竖框类型')
pvbox = GetVBoxParam(attri)
if pvbox: door.mVBoxParam = pvbox
attri = node.get('竖框颜色')
door.mVBoxColor = attri
attri = node.get('上下横框类型')
pudbox = GetUDBoxParam(attri)
if pudbox: door.mUDBoxParam = pudbox
for j in range(0, len(node)):
cnode = node[j]
if cnode.tag != '中横框' : continue
rb = DoorRectBox()
door.boxlist.append(rb)
attri = cnode.get('类型')
rb.boxtype = attri
attri = cnode.get('颜色')
rb.color = attri
rb.vh = True
attri = cnode.get('vh')
if attri == 'False' : rb.vh = False
attri = cnode.get('w0')
rb.w0 = float(attri)
attri = cnode.get('h0')
rb.h0 = float(attri)
attri = cnode.get('x0')
rb.x0 = float(attri)
attri = cnode.get('y0')
rb.y0 = float(attri)
attri = cnode.get('d0')
rb.d0 = float(attri)
attri = cnode.get('w1')
rb.w1 = float(attri)
attri = cnode.get('h1')
rb.h1 = float(attri)
attri = cnode.get('x1')
rb.x1 = float(attri)
attri = cnode.get('y1')
rb.y1 = float(attri)
attri = cnode.get('d1')
rb.d1 = float(attri)
attri = cnode.get('w2')
rb.w2 = float(attri)
attri = cnode.get('h2')
rb.h2 = float(attri)
attri = cnode.get('x2')
rb.x2 = float(attri)
attri = cnode.get('y2')
rb.y2 = float(attri)
attri = cnode.get('d2')
rb.d2 = float(attri)
for j in range(0, len(node)):
cnode = node[j]
if cnode.tag != '门板' : continue
pnl = RectPanel()
pnl.selected = False
door.panellist.append(pnl)
attri = cnode.get('类型')
pnl.PanelType = attri
attri = cnode.get('颜色')
pnl.color = attri
attri = cnode.get('颜色2')
if attri: pnl.color2 = attri
attri = cnode.get('纹路')
pnl.direct = attri
attri = cnode.get('备注')
if attri:
pnl.memo = attri
attri = cnode.get('ExtraData')
if attri:
pnl.extradata = attri
attri = cnode.get('w0')
pnl.w0 = float(attri)
attri = cnode.get('h0')
pnl.h0 = float(attri)
attri = cnode.get('x0')
pnl.x0 = float(attri)
attri = cnode.get('y0')
pnl.y0 = float(attri)
attri = cnode.get('d0')
pnl.d0 = float(attri)
attri = cnode.get('w1')
pnl.w1 = float(attri)
attri = cnode.get('h1')
pnl.h1 = float(attri)
attri = cnode.get('x1')
pnl.x1 = float(attri)
attri = cnode.get('y1')
pnl.y1 = float(attri)
attri = cnode.get('d1')
pnl.d1 = float(attri)
attri = cnode.get('w2')
pnl.w2 = float(attri)
attri = cnode.get('h2')
pnl.h2 = float(attri)
attri = cnode.get('x2')
pnl.x2 = float(attri)
attri = cnode.get('y2')
pnl.y2 = float(attri)
attri = cnode.get('d2')
pnl.d2 = float(attri)
skcolor1, skcolor2, skcolor3, skcolor4 = '', '' ,'', ''
if len(mDoorsList) > 0:
door = mDoorsList[0]
pcolorclass = GetSlidingColorClass('竖框', mSlidingParam['vboxtype'], door.mVBoxColor)
if pcolorclass:
Add2SlidingConfig('SlidingColorClass', pcolorclass)
skcolor1 = pcolorclass['skcolor1']
skcolor2 = pcolorclass['skcolor2']
skcolor3 = pcolorclass['skcolor3']
skcolor4 = pcolorclass['skcolor4']
if (mDataMode==0) and (mTrackParam['wlupcode'] != ''):
pcolorclass = GetSlidingColorClass('上轨', mTrackParam['upname'], mMyUpTrackColor)
if pcolorclass: Add2SlidingConfig('SlidingColorClass', pcolorclass)
if (mDataMode==0) and (mTrackParam['wldncode'] != ''):
pcolorclass = GetSlidingColorClass('上轨', mTrackParam['dnname'], mMyDownTrackColor)
if pcolorclass: Add2SlidingConfig('SlidingColorClass', pcolorclass)
#趟门关联五金
wjname = mSlidingParam['wjname']
if (mDataMode==0) and (wjname != ''):
pbomdetail = GetWjBomDetaildata(wjname, skcolor1, skcolor2, skcolor3, skcolor4)
Add2SlidingConfig('SlidingWjBomDetail', pbomdetail)
#上轨五金
if (mDataMode==0) and (mTrackParam['wjname1'] != ''):
pbomdetail = GetWjBomDetaildata(mTrackParam['wjname1'], skcolor1, skcolor2, skcolor3, skcolor4)
Add2SlidingConfig('SlidingWjBomDetail', pbomdetail)
#下轨五金
if (mDataMode==0) and (mTrackParam['wjname2'] != ''):
pbomdetail = GetWjBomDetaildata(mTrackParam['wjname2'], skcolor1, skcolor2, skcolor3, skcolor4)
Add2SlidingConfig('SlidingWjBomDetail', pbomdetail)
#竖框
for i in range(0, len(mDoorsList)):
if (mDataMode==1) : break
door = mDoorsList[i]
pvbox = GetVBoxParam(door.mVBoxParam['name'])
if pvbox:
Add2SlidingConfig('VBoxParam', pvbox) #竖框参数
pcolorclass = GetSlidingColorClass('竖框', door.mVBoxColor)
Add2SlidingConfig('SlidingColorClass', pcolorclass)
if door.mVBoxParam['wjname'] != '':
pbomdetail = GetWjBomDetaildata(door.mVBoxParam['wjname'], skcolor1, skcolor2, skcolor3, skcolor4)
Add2SlidingConfig('SlidingWjBomDetail', pbomdetail)
# 上下横框
for i in range(0, len(mDoorsList)):
if (mDataMode == 1): break
door = mDoorsList[i]
pcolorclass = GetSlidingColorClass('上横框', mMyUpBoxColor)
Add2SlidingConfig('SlidingColorClass', pcolorclass)
pcolorclass = GetSlidingColorClass('下横框', mMyDownBoxColor)
Add2SlidingConfig('SlidingColorClass', pcolorclass)
#上横框五金
if door.mUDBoxParam['wjname1'] != '':
pbomdetail = GetWjBomDetaildata(door.mUDBoxParam['wjname1'], skcolor1, skcolor2, skcolor3, skcolor4)
Add2SlidingConfig('SlidingWjBomDetail', pbomdetail)
#下横框五金
if door.mUDBoxParam['wjname2'] != '':
pbomdetail = GetWjBomDetaildata(door.mUDBoxParam['wjname2'], skcolor1, skcolor2, skcolor3, skcolor4)
Add2SlidingConfig('SlidingWjBomDetail', pbomdetail)
for j in range(0, len(door.panellist)):
pnl = door.panellist[j]
pnltype = GetSlidingPanelType(mSlidingParam['name'], pnl.PanelType)
for i in range(0, len(mDoorsList)):
if (mDataMode == 1): break
door = mDoorsList[i]
for j in range(0, len(door.boxlist)):
rb = door.boxlist[j]
if rb.h0 <= 0 : continue
phbox = GetSlidingHBoxParam(rb.boxtype)
Add2SlidingConfig('HBoxParam', phbox)
pcolorclass = GetSlidingColorClass('中横框', rb.color)
Add2SlidingConfig('SlidingColorClass', pcolorclass)
if phbox['wjname'] != '':
pbomdetail = GetWjBomDetaildata(phbox['wjname'], skcolor1, skcolor2, skcolor3, skcolor4)
Add2SlidingConfig('SlidingWjBomDetail', pbomdetail)
for i in range(0, len(mDoorsList)):
if (mDataMode == 1): break
door = mDoorsList[i]
for j in range(len(door.panellist)):
pnl = door.panellist[j]
if (pnl.extradata!='' and len(pnl.extradata) > 5): #有竖格门芯再此 从门板中ExtraData字段提取竖格门芯
Sfg_Param = {}
sfgFK = Sfg_Param
sJson = pnl.extradata
sJson = sJson.replace('^', '"')
data = json.loads(sJson)
data['L'] = pnl.w1
data['H'] = pnl.h1
if 'direc' not in data: data['direc'] = 0
if (data['nType'] == 2):
if 'direc' in data and data['direc'] == 1:
xml = Sliding['SfgParam']['HTxml']
cfgobj = copy.deepcopy(HCfgobj2)
config['tmconfig']['SfgParam']['HTxml'] = xml
config['tmconfig']['Hfg2'] = HCfgobj2
else:
xml = Sliding['SfgParam']['Txml']
cfgobj = copy.deepcopy(Cfgobj2)
config['tmconfig']['SfgParam']['Txml'] = xml
config['tmconfig']['Sfg2'] = Cfgobj2
if (data['nType'] == 3):
if 'direc' in data and data['direc'] == 1:
xml = Sliding['SfgParam']['HSxml']
cfgobj = copy.deepcopy(HCfgobj3)
config['tmconfig']['SfgParam']['HSxml'] = xml
config['tmconfig']['Hfg3'] = HCfgobj3
else:
xml = Sliding['SfgParam']['Sxml']
cfgobj = copy.deepcopy(HCfgobj2)
config['tmconfig']['SfgParam']['Sxml'] = xml
config['tmconfig']['Sfg3'] = Cfgobj3
if (data['nType'] == 4):
if 'direc' in data and data['direc'] == 1:
xml = Sliding['SfgParam']['HFxml']
cfgobj = copy.deepcopy(HCfgobj4)
config['tmconfig']['SfgParam']['HFxml'] = xml
config['tmconfig']['Hfg4'] = HCfgobj4
else:
xml = Sliding['SfgParam']['Fxml']
cfgobj = copy.deepcopy(Cfgobj4)
config['tmconfig']['SfgParam']['Fxml'] = xml
config['tmconfig']['Sfg4'] = Cfgobj4
config['tmconfig']['HSHBoxParam'] = [], # 22.横中横 HSHBoxParam
config['tmconfig']['SHBoxParam'] = [], # 23.竖中横 SHBoxParam
pnltype = GetSlidingPanelType(mSlidingParam['name'], pnl.PanelType)
if (pnltype ):
Add2SlidingConfig('PanelType', pnltype)
pnlbomdetail = GetPanelBomdata(pnltype['slaVe'],pnl.PanelType, pnl.color, pnl.color2,door.mVBoxColor, pnl.w1, pnl.h1)
Add2SlidingConfig('PanelBomDetail', pnlbomdetail) #门板附加物料
pssexp = GetSlidingSSExp(pnl.PanelType)
Add2SlidingConfig('SSExp', pssexp)
pcolorclass = GetSlidingColorClass('门板', pnl.PanelType, pnl.color)
if pcolorclass: Add2SlidingConfig('SlidingColorClass', pcolorclass)
#添加门板的关联五金
if (pnltype) and (pnltype['wjname'] !=''):
pbomdetail = GetWjBomDetaildata(pnltype['wjname'], skcolor1, skcolor2, skcolor3, skcolor4)
Add2SlidingConfig('SlidingWjBomDetail', pbomdetail)
def ImportXomItemForBom(root, tmlist, ymlist):
string = root.getAttribute('类别')
if (string == '趟门,趟门') or (string == '掩门,掩门'):
for k in range(0, root.childNodes.length): #
node = root.childNodes[k]
if node.nodeType != 1: continue
if node.nodeName == '模板':
cnode = getfirstchild(node)
childxml = ''
if cnode:
childxml = cnode.toxml('utf8')
if (childxml!='') and (string == '趟门,趟门'):
#gettmconfig(xmltemplate, config)
tmlist.append(childxml)
if (childxml!='') and (string == '掩门,掩门'):
#getymconfig(xmltemplate, config)
ymlist.append(childxml)
for i in range(0, root.childNodes.length):
node = root.childNodes[i]
if node.nodeType != 1 : continue
if node.nodeName == '我的模块':
if node.childNodes.length > 0:
for j in range(node.childNodes.length-1,-1,-1): #
cnode = node.childNodes[j]
if cnode.nodeType != 1: continue
string = cnode.getAttribute('显示方式')
if (string == '3'):
node.childNodes.remove(cnode)
continue
if cnode.childNodes.length > 0:
ccnode = getfirstchild(cnode)
ImportXomItemForBom(ccnode, tmlist, ymlist) #ccnode 产品节点
def Delphi_Round(num):
if num < 0:
return -int((Delphi_Round(-num)))
round10 = num * 10
round1 = round(num)
if (round10 - round1 * 10 == -5):
pint = int(num)
pvalue = pint % 10 # ; // 个位的数值
if (pvalue % 2):
return (pint + 1) # // 奇进偶不进
else:
return pint
else:
return int(round1)
def Number(string):
try:
return float(string)
except:
return 0
def GetTrueFalse(string):
if(string == 'TRUE' or string == 'True'or string == True or string == 1):
return True
else:
return False
def sSort(HCfgobj):
for i in range(0, len(HCfgobj)):
slist = HCfgobj[i]['门芯列表']
nList = []
if (len(slist) == 3): nList = ['门芯1', '竖中横1', '门芯2']
if (len(slist) == 5): nList = ['门芯1', '竖中横1', '门芯2', '竖中横2', '门芯3'];
if (len(slist) == 7): nList = ['门芯1', '竖中横1', '门芯2', '竖中横2', '门芯3', '竖中横3', '门芯4']
for j in range(0, len(nList)):
for k in range(0, len(slist)):
if (nList[j] == slist[k]['名称']):
nList[j] = slist[k]
break
HCfgobj[i]['门芯列表'] = nList
async def InitData1(rootname, RootPath):
global mSlidingExpList, mSlidingTypeList, mSlidingParamList, mUDBoxParamList, \
mTrackParamList, mHBoxParamList, PanelTypeList, mSlidingColorList, \
mSlidingColorClassList, mSlidingWjBomDetailList, mSSExpList, \
mSlidingAccessoryList, mVBoxParamList, mPanelBomDetailList, \
UHBoxParam, HHBoxParam, Cfgobj2, Cfgobj3, Cfgobj4, SHBoxParam, \
HCfgobj2, HCfgobj3, HCfgobj4, HSHBoxParam, cfglist, \
mExpList, mHandleList, mDoorHBoxParamList, mHingeList,\
mCurHingeList, mDoorPanelBomDetailList , mDoorPanelTypeList,\
mParamList, mShutterExpList,\
mWJBomList, mWJBomDetailList, mDoorXMLList, mAccessoryList,\
mColorList, mColorClassList, mColorClass2List, mTypeList
'''
:param rootname: 数据库id
:return:
趟门对象25:
mSlidingExpList\ mSlidingTypeList \ mSlidingParamList \ mUDBoxParamList
mTrackParamList \ mHBoxParamList \ PanelTypeList \ mSlidingColorList
mSlidingColorClassList \ mSlidingWjBomDetailList \ mSSExpList
mSlidingAccessoryList \ mVBoxParamList \ mPanelBomDetailList
UHBoxParam\ HHBoxParam \ Cfgobj2 \ Cfgobj3 \Cfgobj4 \ SHBoxParam
HCfgobj2 \ HCfgobj3 \ HCfgobj4 \ HSHBoxParam \ cfglist
掩门对象17:
mExpList\ mHandleList\ mDoorHBoxParamList\ mHingeList
mCurHingeList \mDoorPanelBomDetailList \ mDoorPanelTypeList \
mParamList\ mShutterExpList\
mWJBomList\ mWJBomDetailList\ mDoorXMLList\ mAccessoryList\
mColorList\ mColorClassList\ mColorClass2List\ mTypeList\
'''
adb = db.DB()
adb.open(RootPath+'\\SlidADoorDBD', dbtype=db.DB_HASH, flags=db.DB_CREATE)
# 趟门
mSlidingExpListcontent = yield adb.get((rootname + 'mSlidingExpList').encode('utf8'))
mSlidingTypeListcontent = yield adb.get((rootname + 'mSlidingTypeList').encode('utf8'))
SlidingParamcontent = yield adb.get((rootname + 'mSlidingParamList').encode('utf8'))
UDBoxParamcontent = yield adb.get((rootname + 'mUDBoxParamList').encode('utf8'))
TrackParamcontent = yield adb.get((rootname + 'mTrackParamList').encode('utf8'))
HBoxParamcontent = yield adb.get((rootname + 'mHBoxParamList').encode('utf8'))
PanelTypeListcontent = yield adb.get((rootname + 'PanelTypeList').encode('utf8'))
mSlidingColorListcontent = yield adb.get((rootname + 'mSlidingColorList').encode('utf8'))
mSlidingColorClassListcontent = yield adb.get((rootname + 'mSlidingColorClassList').encode('utf8'))
mSlidingWjBomDetailListcontent = yield adb.get((rootname + 'mSlidingWjBomDetailList').encode('utf8'))
mSSExpListcontent = yield adb.get((rootname + 'mSSExpList').encode('utf8'))
mSlidingAccessoryListcontent = yield adb.get((rootname + 'mSlidingAccessoryList').encode('utf8'))
mVBoxParamListcontent = yield adb.get((rootname + 'mVBoxParamList').encode('utf8'))
mPanelBomDetailListcontent = yield adb.get((rootname + 'mPanelBomDetailList').encode('utf8'))
UHBoxParamcontent = yield adb.get((rootname + 'UHBoxParam').encode('utf8'))
UHBoxParamcontent = yield adb.get((rootname + 'UHBoxParam').encode('utf8'))
HHBoxParamcontent = yield adb.get((rootname + 'HHBoxParam').encode('utf8'))
Cfgobj2content = yield adb.get((rootname + 'Cfgobj2').encode('utf8'))
Cfgobj3content = yield adb.get((rootname + 'Cfgobj3').encode('utf8'))
Cfgobj4content = yield adb.get((rootname + 'Cfgobj4').encode('utf8'))
SHBoxParamcontent = yield adb.get((rootname + 'SHBoxParam').encode('utf8'))
HCfgobj2content = yield adb.get((rootname + 'HCfgobj2').encode('utf8'))
HCfgobj3content = yield adb.get((rootname + 'HCfgobj3').encode('utf8'))
HCfgobj4content = yield adb.get((rootname + 'HCfgobj4').encode('utf8'))
HSHBoxParamcontent = yield adb.get((rootname + 'HSHBoxParam').encode('utf8'))
cfglistcontent = yield adb.get((rootname + 'cfglist').encode('utf8'))
mSlidingExpList = json.loads(mSlidingExpListcontent, encoding='gbk')
print(mSlidingExpList)
exit(1)
mSlidingTypeList = json.loads(mSlidingTypeListcontent, encoding='gbk')
mSlidingParamList = json.loads(SlidingParamcontent, encoding='gbk')
mUDBoxParamList = json.loads(UDBoxParamcontent, encoding='gbk')
mTrackParamList = json.loads(TrackParamcontent, encoding='gbk')
mHBoxParamList = json.loads(HBoxParamcontent, encoding='gbk')
PanelTypeList = json.loads(PanelTypeListcontent, encoding='gbk')
mSlidingColorList = json.loads(mSlidingColorListcontent, encoding='gbk')
mSlidingColorClassList = json.loads(mSlidingColorClassListcontent, encoding='gbk')
mSlidingWjBomDetailList = json.loads(mSlidingWjBomDetailListcontent, encoding='gbk')
mSSExpList = json.loads(mSSExpListcontent, encoding='gbk')
mSlidingAccessoryList = json.loads(mSlidingAccessoryListcontent, encoding='gbk')
mVBoxParamList = json.loads(mVBoxParamListcontent, encoding='gbk')
mPanelBomDetailList = json.loads(mPanelBomDetailListcontent, encoding='gbk')
UHBoxParam = json.loads(UHBoxParamcontent, encoding='gbk')
HHBoxParam = json.loads(HHBoxParamcontent, encoding='gbk')
Cfgobj2 = json.loads(Cfgobj2content, encoding='gbk')
Cfgobj3 = json.loads(Cfgobj3content, encoding='gbk')
Cfgobj4 = json.loads(Cfgobj4content, encoding='gbk')
SHBoxParam = json.loads(SHBoxParamcontent, encoding='gbk')
HCfgobj2 = json.loads(HCfgobj2content, encoding='gbk')
HCfgobj3 = json.loads(HCfgobj3content, encoding='gbk')
HCfgobj4 = json.loads(HCfgobj4content, encoding='gbk')
HSHBoxParam = json.loads(HSHBoxParamcontent, encoding='gbk')
cfglist = json.loads(cfglistcontent, encoding='gbk')
sSort(Cfgobj2)
sSort(Cfgobj3)
sSort(Cfgobj4)
sSort(HCfgobj2)
sSort(HCfgobj3)
sSort(HCfgobj4)
# 掩门
mExpListcontent = yield adb.get((rootname + 'mExpList').encode('utf8'))
mHandleListcontent = yield adb.get((rootname + 'mHandleList').encode('utf8'))
mDoorHBoxParamListcontent = yield adb.get((rootname + 'mDoorHBoxParamList').encode('utf8'))
mHingeListcontent = yield adb.get((rootname + 'mHingeList').encode('utf8'))
mCurHingeListcontent = yield adb.get((rootname + 'mCurHingeList').encode('utf8'))
mDoorPanelBomDetailListcontent = yield adb.get((rootname + 'mDoorPanelBomDetailList').encode('utf8'))
mDoorPanelTypeListcontent = yield adb.get((rootname + 'mDoorPanelTypeList').encode('utf8'))
mParamListcontent = yield adb.get((rootname + 'mParamList').encode('utf8'))
mShutterExpListcontent = yield adb.get((rootname + 'mShutterExpList').encode('utf8'))
mWJBomListcontent = yield adb.get((rootname + 'mWJBomList').encode('utf8'))
mWJBomDetailListcontent = yield adb.get((rootname + 'mWJBomDetailList').encode('utf8'))
mDoorXMLListcontent = yield adb.get((rootname + 'mDoorXMLList').encode('utf8'))
mAccessoryListcontent = yield adb.get((rootname + 'mAccessoryList').encode('utf8'))
mColorListcontent = yield adb.get((rootname + 'mColorList').encode('utf8'))
mColorClassListcontent = yield adb.get((rootname + 'mColorClassList').encode('utf8'))
mColorClass2Listcontent = yield adb.get((rootname + 'mColorClass2List').encode('utf8'))
mTypeListcontent = yield adb.get((rootname + 'mTypeList').encode('utf8'))
adb.close()
mExpList = json.loads(mExpListcontent, encoding='gbk')
mHandleList = json.loads(mHandleListcontent, encoding='gbk')
mDoorHBoxParamList = json.loads(mDoorHBoxParamListcontent, encoding='gbk')
mHingeList = json.loads(mHingeListcontent, encoding='gbk')
mCurHingeList = json.loads(mCurHingeListcontent, encoding='gbk')
mDoorPanelBomDetailList = json.loads(mDoorPanelBomDetailListcontent, encoding='gbk')
mDoorPanelTypeList = json.loads(mDoorPanelTypeListcontent, encoding='gbk')
mParamList = json.loads(mParamListcontent, encoding='gbk')
mShutterExpList = json.loads(mShutterExpListcontent, encoding='gbk')
mWJBomList = json.loads(mWJBomListcontent, encoding='gbk')
mWJBomDetailList = json.loads(mWJBomDetailListcontent, encoding='gbk')
mDoorXMLList = json.loads(mDoorXMLListcontent, encoding='gbk')
mAccessoryList = json.loads(mAccessoryListcontent, encoding='gbk')
mColorList = json.loads(mColorListcontent, encoding='gbk')
mColorClassList = json.loads(mColorClassListcontent, encoding='gbk')
mColorClass2List = json.loads(mColorClass2Listcontent, encoding='gbk')
mTypeList = json.loads(mTypeListcontent, encoding='gbk')
# 以上是掩门的
def InitData(rootname, RootPath):
global mSlidingExpList, mSlidingTypeList, mSlidingParamList, mUDBoxParamList, \
mTrackParamList, mHBoxParamList, PanelTypeList, mSlidingColorList, \
mSlidingColorClassList, mSlidingWjBomDetailList, mSSExpList, \
mSlidingAccessoryList, mVBoxParamList, mPanelBomDetailList, \
UHBoxParam, HHBoxParam, Cfgobj2, Cfgobj3, Cfgobj4, SHBoxParam, \
HCfgobj2, HCfgobj3, HCfgobj4, HSHBoxParam, cfglist, \
mExpList, mHandleList, mDoorHBoxParamList, mHingeList,\
mCurHingeList, mDoorPanelBomDetailList , mDoorPanelTypeList,\
mParamList, mShutterExpList,\
mWJBomList, mWJBomDetailList, mDoorXMLList, mAccessoryList,\
mColorList, mColorClassList, mColorClass2List, mTypeList
'''
:param rootname: 数据库id
:return:
趟门对象25:
mSlidingExpList\ mSlidingTypeList \ mSlidingParamList \ mUDBoxParamList
mTrackParamList \ mHBoxParamList \ PanelTypeList \ mSlidingColorList
mSlidingColorClassList \ mSlidingWjBomDetailList \ mSSExpList
mSlidingAccessoryList \ mVBoxParamList \ mPanelBomDetailList
UHBoxParam\ HHBoxParam \ Cfgobj2 \ Cfgobj3 \Cfgobj4 \ SHBoxParam
HCfgobj2 \ HCfgobj3 \ HCfgobj4 \ HSHBoxParam \ cfglist
掩门对象17:
mExpList\ mHandleList\ mDoorHBoxParamList\ mHingeList
mCurHingeList \mDoorPanelBomDetailList \ mDoorPanelTypeList \
mParamList\ mShutterExpList\
mWJBomList\ mWJBomDetailList\ mDoorXMLList\ mAccessoryList\
mColorList\ mColorClassList\ mColorClass2List\ mTypeList\
'''
adb = db.DB()
adb.open(RootPath+'\\SlidADoorDBD', dbtype=db.DB_HASH, flags=db.DB_CREATE)
# 趟门
mSlidingExpListcontent = adb.get((rootname + 'mSlidingExpList').encode('utf8'))
mSlidingTypeListcontent = adb.get((rootname + 'mSlidingTypeList').encode('utf8'))
SlidingParamcontent = adb.get((rootname + 'mSlidingParamList').encode('utf8'))
UDBoxParamcontent = adb.get((rootname + 'mUDBoxParamList').encode('utf8'))
TrackParamcontent = adb.get((rootname + 'mTrackParamList').encode('utf8'))
HBoxParamcontent = adb.get((rootname + 'mHBoxParamList').encode('utf8'))
PanelTypeListcontent = adb.get((rootname + 'PanelTypeList').encode('utf8'))
mSlidingColorListcontent = adb.get((rootname + 'mSlidingColorList').encode('utf8'))
mSlidingColorClassListcontent = adb.get((rootname + 'mSlidingColorClassList').encode('utf8'))
mSlidingWjBomDetailListcontent = adb.get((rootname + 'mSlidingWjBomDetailList').encode('utf8'))
mSSExpListcontent = adb.get((rootname + 'mSSExpList').encode('utf8'))
mSlidingAccessoryListcontent = adb.get((rootname + 'mSlidingAccessoryList').encode('utf8'))
mVBoxParamListcontent = adb.get((rootname + 'mVBoxParamList').encode('utf8'))
mPanelBomDetailListcontent = adb.get((rootname + 'mPanelBomDetailList').encode('utf8'))
UHBoxParamcontent = adb.get((rootname + 'UHBoxParam').encode('utf8'))
UHBoxParamcontent = adb.get((rootname + 'UHBoxParam').encode('utf8'))
HHBoxParamcontent = adb.get((rootname + 'HHBoxParam').encode('utf8'))
Cfgobj2content = adb.get((rootname + 'Cfgobj2').encode('utf8'))
Cfgobj3content = adb.get((rootname + 'Cfgobj3').encode('utf8'))
Cfgobj4content = adb.get((rootname + 'Cfgobj4').encode('utf8'))
SHBoxParamcontent = adb.get((rootname + 'SHBoxParam').encode('utf8'))
HCfgobj2content = adb.get((rootname + 'HCfgobj2').encode('utf8'))
HCfgobj3content = adb.get((rootname + 'HCfgobj3').encode('utf8'))
HCfgobj4content = adb.get((rootname + 'HCfgobj4').encode('utf8'))
HSHBoxParamcontent = adb.get((rootname + 'HSHBoxParam').encode('utf8'))
cfglistcontent = adb.get((rootname + 'cfglist').encode('utf8'))
mSlidingExpList = json.loads(mSlidingExpListcontent, encoding='gbk')
mSlidingTypeList = json.loads(mSlidingTypeListcontent, encoding='gbk')
mSlidingParamList = json.loads(SlidingParamcontent, encoding='gbk')
mUDBoxParamList = json.loads(UDBoxParamcontent, encoding='gbk')
mTrackParamList = json.loads(TrackParamcontent, encoding='gbk')
mHBoxParamList = json.loads(HBoxParamcontent, encoding='gbk')
PanelTypeList = json.loads(PanelTypeListcontent, encoding='gbk')
mSlidingColorList = json.loads(mSlidingColorListcontent, encoding='gbk')
mSlidingColorClassList = json.loads(mSlidingColorClassListcontent, encoding='gbk')
mSlidingWjBomDetailList = json.loads(mSlidingWjBomDetailListcontent, encoding='gbk')
mSSExpList = json.loads(mSSExpListcontent, encoding='gbk')
mSlidingAccessoryList = json.loads(mSlidingAccessoryListcontent, encoding='gbk')
mVBoxParamList = json.loads(mVBoxParamListcontent, encoding='gbk')
mPanelBomDetailList = json.loads(mPanelBomDetailListcontent, encoding='gbk')
UHBoxParam = json.loads(UHBoxParamcontent, encoding='gbk')
HHBoxParam = json.loads(HHBoxParamcontent, encoding='gbk')
Cfgobj2 = json.loads(Cfgobj2content, encoding='gbk')
Cfgobj3 = json.loads(Cfgobj3content, encoding='gbk')
Cfgobj4 = json.loads(Cfgobj4content, encoding='gbk')
SHBoxParam = json.loads(SHBoxParamcontent, encoding='gbk')
HCfgobj2 = json.loads(HCfgobj2content, encoding='gbk')
HCfgobj3 = json.loads(HCfgobj3content, encoding='gbk')
HCfgobj4 = json.loads(HCfgobj4content, encoding='gbk')
HSHBoxParam = json.loads(HSHBoxParamcontent, encoding='gbk')
cfglist = json.loads(cfglistcontent, encoding='gbk')
sSort(Cfgobj2)
sSort(Cfgobj3)
sSort(Cfgobj4)
sSort(HCfgobj2)
sSort(HCfgobj3)
sSort(HCfgobj4)
# 掩门
mExpListcontent = adb.get((rootname + 'mExpList').encode('utf8'))
mHandleListcontent = adb.get((rootname + 'mHandleList').encode('utf8'))
mDoorHBoxParamListcontent = adb.get((rootname + 'mDoorHBoxParamList').encode('utf8'))
mHingeListcontent = adb.get((rootname + 'mHingeList').encode('utf8'))
mCurHingeListcontent = adb.get((rootname + 'mCurHingeList').encode('utf8'))
mDoorPanelBomDetailListcontent = adb.get((rootname + 'mDoorPanelBomDetailList').encode('utf8'))
mDoorPanelTypeListcontent = adb.get((rootname + 'mDoorPanelTypeList').encode('utf8'))
mParamListcontent = adb.get((rootname + 'mParamList').encode('utf8'))
mShutterExpListcontent = adb.get((rootname + 'mShutterExpList').encode('utf8'))
mWJBomListcontent = adb.get((rootname + 'mWJBomList').encode('utf8'))
mWJBomDetailListcontent = adb.get((rootname + 'mWJBomDetailList').encode('utf8'))
mDoorXMLListcontent = adb.get((rootname + 'mDoorXMLList').encode('utf8'))
mAccessoryListcontent = adb.get((rootname + 'mAccessoryList').encode('utf8'))
mColorListcontent = adb.get((rootname + 'mColorList').encode('utf8'))
mColorClassListcontent = adb.get((rootname + 'mColorClassList').encode('utf8'))
mColorClass2Listcontent = adb.get((rootname + 'mColorClass2List').encode('utf8'))
mTypeListcontent = adb.get((rootname + 'mTypeList').encode('utf8'))
adb.close()
mExpList = json.loads(mExpListcontent, encoding='gbk')
mHandleList = json.loads(mHandleListcontent, encoding='gbk')
mDoorHBoxParamList = json.loads(mDoorHBoxParamListcontent, encoding='gbk')
mHingeList = json.loads(mHingeListcontent, encoding='gbk')
mCurHingeList = json.loads(mCurHingeListcontent, encoding='gbk')
mDoorPanelBomDetailList = json.loads(mDoorPanelBomDetailListcontent, encoding='gbk')
mDoorPanelTypeList = json.loads(mDoorPanelTypeListcontent, encoding='gbk')
mParamList = json.loads(mParamListcontent, encoding='gbk')
mShutterExpList = json.loads(mShutterExpListcontent, encoding='gbk')
mWJBomList = json.loads(mWJBomListcontent, encoding='gbk')
mWJBomDetailList = json.loads(mWJBomDetailListcontent, encoding='gbk')
mDoorXMLList = json.loads(mDoorXMLListcontent, encoding='gbk')
mAccessoryList = json.loads(mAccessoryListcontent, encoding='gbk')
mColorList = json.loads(mColorListcontent, encoding='gbk')
mColorClassList = json.loads(mColorClassListcontent, encoding='gbk')
mColorClass2List = json.loads(mColorClass2Listcontent, encoding='gbk')
mTypeList = json.loads(mTypeListcontent, encoding='gbk')
# 以上是掩门的
#@tornado.gen.coroutine
def LoadXML2Bom(xmlfile, RootName, Path):
global config
RootPath = Path
#with threadLock:
config = {
'ymconfig': {
'mExpList': [],
'mTypeList': [],
'mParamList': [],
'mHandleList': [],
'mHingeList': [],
'mDoorHBoxParamList': [],
'mDoorPanelTypeList': [],
'mAccessoryList': [],
'mColorClassList': [],
'mColorClass2List': [],
'mShutterExpList': [],
'mWJBomList': [],
'mWJBomDetailList': [],
'mDoorPanelBomDetailList': [],
'mDoorXMLList': []
},
'tmconfig': {
'SlidingExp': [], # 1.单门数量类型
'SlidingType': [], # 2.门类型
'SlidingParam': [], # 3.边框类型
'UDBoxParam': [], # 4.上下横框类型
'TrackParam': [], # 5.趟门上下轨参数
'HBoxParam': [], # 6.趟门中横框
'VBoxParam': [], # 7.竖框参数
'SlidingColor': [], # 8.颜色分类2
'PanelType': [], # 9.门板类型
'SlidingAccessory': [], # 10.五金配件
'SlidingColorClass': [], # 11.颜色分类
'SSExp': [], # 12.百叶板计算公式
'SlidingWjBomDetail': [], # 13.五金配件分类数据
'PanelBomDetail': [], # 14.门板附加物料
'Cfglist': [], # 15.门转换表
'Hfg2': [], # 16.趟门2横分格
'Hfg3': [], # 17.趟门3横分格
'Hfg4': [], # 18.趟门4横分格
'Sfg2': [], # 19.趟门2竖分格
'Sfg3': [], # 20.趟门3竖分格
'Sfg4': [], # 21.趟门4竖分格
'HSHBoxParam': [], # 22.横中横 HSHBoxParam
'SHBoxParam': [], # 23.竖中横 SHBoxParam
'SfgParam': {}, # 24 xml
},
'gtconfig':{
'qdsoft_id':'data',
'UrlIp':'http://129.204.134.85:8002/Qdbom'
}
}
tmlist = []
ymlist = []
print(RootName, Path)
#yield tornado.gen.sleep(0.001)
InitData(RootName, RootPath)
DOMTree = minidom.parse(xmlfile)
root = DOMTree.documentElement
node = getfirstchild(root)
ImportXomItemForBom(node, tmlist, ymlist)
for tmchildxml in tmlist:
gettmconfig(tmchildxml)
for ymchildxml in ymlist:
getymconfig(ymchildxml)
print('tmlist=' + str(len(tmlist)))
print('ymlist=' + str(len(ymlist)))
config['tmconfig']['Cfglist'] = cfglist
config['result'] =1
configjson = json.dumps(config, ensure_ascii=False).encode('utf8')
with open('config.txt','w',encoding='utf8') as f:
f.write(json.dumps(config,ensure_ascii=False))
return configjson
if __name__ == '__main__':
print('999999999')
Sliding = {
'Sfg_Param': {'HTxml': '<产品 名称="横2格门" 类别="" 摆放方式="整块;左右延伸:-1;'
'前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;'
'" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" '
'ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表>'
'</变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" '
'Z="0" 宽="$门芯1宽度" 深="$门芯1厚度" 高="H" 类别="" '
'基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" '
'HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" '
'OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="$门芯1宽度-$竖中横进槽" '
'Y="0" Z="0" 宽="L" 深="$竖中横厚度" 高="$竖中横宽度" 类别="" '
'基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" '
'Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/>'
'<板件 名称="门芯2" X="$门芯1宽度+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" '
'Z="0" 宽="L-$门芯1宽度-$竖中横宽度+2*$竖中横进槽" 深="$门芯2厚度" 高="H" '
'类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" '
'Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/></我的模块>'
'<我的规格><规格 名称="竖2格门" 宽="800" 深="20" 高="1000"/></我的规格></产品>',
'Txml': '<产品 名称="竖2格门" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;'
'上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" '
'颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" '
'LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块>'
'<板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="$门芯1宽度" '
'深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" '
'MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" '
'OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="$门芯1宽度-$竖中横进槽" Y="0" Z="0" '
'宽="$竖中横宽度" 深="$竖中横宽度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" '
'MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" '
'guid="2"/><板件 名称="门芯2" X="$门芯1宽度+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" '
'Z="0" 宽="L-$门芯1宽度-$竖中横宽度+2*$竖中横进槽" 深="$门芯2厚度" 高="H" 类别="" '
'基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" '
'ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/></我的模块><我的规格><规格 '
'名称="竖2格门" 宽="800" 深="20" 高="1000"/></我的规格></产品>',
'Sxml': '<产品 名称="竖3格门_两边均分" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2-$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="$门芯2宽度" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/><板件 名称="竖中横2" X="L-(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2-$竖中横宽度+$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="4"/><板件 名称="门芯3" X="L-(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" Y="$门芯3前偏移" Z="0" 宽="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" 深="$门芯3厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="5"/></我的模块><我的规格><规格 名称="竖3格门" 宽="800" 深="20" 高="1000"/></我的规格></产品>',
'Fxml': '<产品 名称="竖4格门_改123" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="$门芯1宽度" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="$门芯1宽度-$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="4" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="$门芯1宽度+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="$门芯2宽度" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/><板件 名称="竖中横2" X="$门芯1宽度+$门芯2宽度+$竖中横宽度-3*$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="4" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="4"/><板件 名称="门芯3" X="$门芯1宽度+$门芯2宽度+2*$竖中横宽度-4*$竖中横进槽" Y="$门芯3前偏移" Z="0" 宽="$门芯3宽度" 深="$门芯3厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="5"/><板件 名称="竖中横3" X="$门芯1宽度+$门芯2宽度+$门芯3宽度+2*$竖中横宽度-5*$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="4" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="6"/><板件 名称="门芯4" X="L-(L-$门芯1宽度-$门芯2宽度-$门芯3宽度-3*$竖中横宽度+6*$竖中横进槽)" Y="$门芯4前偏移" Z="0" 宽="L-$门芯1宽度-$门芯2宽度-$门芯3宽度-3*$竖中横宽度+6*$竖中横进槽" 深="$门芯4厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="7"/></我的模块><我的规格><规格 名称="竖3格门" 宽="900" 深="20" 高="1000"/></我的规格></产品>',
'HSxml': '<产品 名称="横3格门_两边均分" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2-$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="$门芯2宽度" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/><板件 名称="竖中横2" X="L-(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2-$竖中横宽度+$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="4"/><板件 名称="门芯3" X="L-(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" Y="$门芯3前偏移" Z="0" 宽="(L-$门芯2宽度-2*$竖中横宽度+4*$竖中横进槽)/2" 深="$门芯3厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="1" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="5"/></我的模块><我的规格><规格 名称="竖3格门" 宽="800" 深="20" 高="1000"/></我的规格></产品>',
'HFxml': '<产品 名称="横4格门_改123" 类别="" 摆放方式="整块;左右延伸:-1;前后延伸:-1;上下延伸:-1;尺寸限制:1,1220,1,1220,1,2430;" 装饰类别="趟门" 材料="" 颜色="" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" LgwjFlag="0"><摆放规则列表/><变量列表></变量列表><我的模块><板件 名称="门芯1" X="0" Y="$门芯1前偏移" Z="0" 宽="$门芯1宽度" 深="$门芯1厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="1"/><板件 名称="竖中横1" X="$门芯1宽度-$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="2"/><板件 名称="门芯2" X="$门芯1宽度+$竖中横宽度-2*$竖中横进槽" Y="$门芯2前偏移" Z="0" 宽="$门芯2宽度" 深="$门芯2厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="3"/><板件 名称="竖中横2" X="$门芯1宽度+$门芯2宽度+$竖中横宽度-3*$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="4"/><板件 名称="门芯3" X="$门芯1宽度+$门芯2宽度+2*$竖中横宽度-4*$竖中横进槽" Y="$门芯3前偏移" Z="0" 宽="$门芯3宽度" 深="$门芯3厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="5"/><板件 名称="竖中横3" X="$门芯1宽度+$门芯2宽度+$门芯3宽度+2*$竖中横宽度-5*$竖中横进槽" Y="0" Z="0" 宽="$竖中横宽度" 深="$竖中横厚度" 高="H" 类别="" 基础图形="BG_竖中横" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="6"/><板件 名称="门芯4" X="L-(L-$门芯1宽度-$门芯2宽度-$门芯3宽度-3*$竖中横宽度+6*$竖中横进槽)" Y="$门芯4前偏移" Z="0" 宽="L-$门芯1宽度-$门芯2宽度-$门芯3宽度-3*$竖中横宽度+6*$竖中横进槽" 深="$门芯4厚度" 高="H" 类别="" 基础图形="BG::RECT" 装饰类别="趟门" MATID="" DI="3" HoleFlag="0" Flag32="0" ErrorFlag="0" ActFlag="0" OZ="" 图形参数="" guid="7"/></我的模块><我的规格><规格 名称="竖3格门" 宽="900" 深="20" 高="1000"/></我的规格></产品>'
},
}
config = {
'ymconfig':{
'mExPList':[],
'mTypeList':[],
'mParamList':[],
'mHandleList':[],
'mHingeList':[],
'mDoorHBoxParamList':[],
'mDoorPanelTypeList':[],
'mAccessoryList':[],
'mColorClassList':[],
'mColorClass2List':[],
'mShutterExpList':[],
'mWJBomList':[],
'mWJBomDetailList':[],
'mDoorPanelBomDetailList':[],
'mDoorXMLList':[]
},
'tmconfig':{
'SlidingExp':[], #1.单门数量类型
'SlidingType':[], #2.门类型
'SlidingParam':[], #3.边框类型
'UDBoxParam':[],# 4.上下横框类型
'TrackParam':[], #5.趟门上下轨参数
'HBoxParam':[], #6.趟门中横框
'VBoxParam':[], #7.竖框参数
'SlidingColor':[], #8.颜色分类2
'PanelType':[], #9.门板类型
'SlidingAccessory':[], #10.五金配件
'SlidingColorClass':[], #11.颜色分类
'SSExp':[], #12.百叶板计算公式
'SlidingWjBomDetail':[], #13.五金配件分类数据
'PanelBomDetail':[], #14.门板附加物料
'Cfglist':[], #15.门转换表
'Hfg2':[], #16.趟门2横分格
'Hfg3':[], #17.趟门3横分格
'Hfg4': [], # 18.趟门4横分格
'Sfg2': [], # 19.趟门2竖分格
'Sfg3': [], # 20.趟门3竖分格
'Sfg4': [], # 21.趟门4竖分格
'HSHBoxParam': [], # 22.横中横 HSHBoxParam
'SHBoxParam': [], # 23.竖中横 SHBoxParam
'SfgParam':{}, #24 xml
},
}
# adb = db.DB()
# adb.open('db_filename', dbtype=db.DB_HASH, flags=db.DB_CREATE)
# for i, w in enumerate('some word for example'.split()):
# adb.put(w.encode('utf8'), str(i))
#
# for key, data in irecords(adb.cursor()):
# print(1,key, data)
# adb.close()
#
# the_same_db = db.DB()
# the_same_db.open("db_filename")
# the_same_db.put('skidoo'.encode('utf8'), '23') # 加入数据库
# the_same_db.put('for'.encode('utf8'), 'change the data') # 改变数据库的数据
# for key, data in irecords(the_same_db.cursor()):
# print(key, data)
# the_same_db.close()
mDoorsList = []
mGridItem = 0 #均分
base_dir = os.path.abspath(os.path.join(os.getcwd(),'..'))
print('2=',base_dir)
path = 'D:\\HGSoftware\\009_华广定制一体化设计软件_在线版本\\Python\\xmls\K10005600202190529001\\#order_scene0_space30BB2B1351B9219B63112944FB2FA9E1.xml'
#path ='D:\\nginx-1.0.11\\nginx-1.0.11\\html\\data\\Python\\UpLoadXml\\#order_scene0_space30BB2B1351B9219B63112944FB2FA9E1.xml'
#path = 'D:\\HGSoftware\\001_美蝶设计软件工厂版190807\\Python3\\TestPython\\ord\\K10008220466190523001\\#order_scene0_space3420F65812518FAE670B179D73A6CBD8'
#path = 'extradata.xml'
LoadXML2Bom(path, 'data', "D:\\nginx-1.0.11\\nginx-1.0.11\html\data")
# for pwjbomdetail in mWJBomDetailList:
# Add2Config('mWJBomDetailList', pwjbomdetail) # 五金配件分类数据.cfg
# for pa in mAccessoryList:
# Add2Config('mAccessoryList', pa) # 五金配件.cfg
# for ppbdetail in mDoorPanelBomDetailList:
# Add2Config('mDoorPanelBomDetailList', ppbdetail) # 门芯附加物料.cfg
#for key, value in config['ymconfig'].items():
#_logging.debug(key + ',' +str(len(value)))
#print(json.dumps(config,encoding='utf8',ensure_ascii=False).encode('utf8'))
|
[
"15179462795@163.com"
] |
15179462795@163.com
|
e5048fb4de365e7dc3b5ed83587cbced09461960
|
11f0e2f1024faf420dfbc792d059dff29aa262d3
|
/laonlp/corpus/__init__.py
|
2ea0730bb20442e0bb034941d9d534802f16a641
|
[
"Apache-2.0"
] |
permissive
|
shun-liang/LaoNLP
|
b90ee16308299cb356a457c7a12293f3d822feaf
|
8a710eba2a53a303cd3e76899aaed13805f4da55
|
refs/heads/master
| 2022-12-03T12:54:42.867507
| 2020-07-09T05:04:22
| 2020-07-09T05:04:22
| 288,761,105
| 0
| 0
|
Apache-2.0
| 2020-08-19T14:56:41
| 2020-08-19T14:56:40
| null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
# -*- coding: utf-8 -*-
import laonlp
import os
laonlp_path = os.path.dirname(laonlp.__file__)
from laonlp.corpus.lao_words import *
__all__ = [
"lao_dictionary",
"lo_spellcheckdict",
"lao_words"
]
|
[
"wannaphong@yahoo.com"
] |
wannaphong@yahoo.com
|
c691925ca055e6e7c7e960285a93de90a244254d
|
88a10b94803b42e5cd2407006ce88e40b9aaca06
|
/test/helpers.py
|
13ea5b29f1442c1a9f5d73a49945e78364b8128c
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
adamfranco/pyosmium
|
8c12f02481808b279dec71f24ded454955ea2e01
|
f36fa2e8fa8bf80539e6124cc7c0bed7ccca2097
|
refs/heads/master
| 2020-03-29T08:04:28.076205
| 2018-08-27T18:56:46
| 2018-08-27T18:56:46
| 149,692,981
| 0
| 0
|
BSD-2-Clause
| 2018-09-21T01:32:38
| 2018-09-21T01:32:38
| null |
UTF-8
|
Python
| false
| false
| 5,771
|
py
|
""" Provides some helper functions for test.
"""
import random
import tempfile
import os
import sys
from textwrap import dedent
import osmium
from datetime import datetime
if sys.version_info[0] == 3:
from datetime import timezone
def mkdate(*args):
return datetime(*args, tzinfo=timezone.utc)
else:
def mkdate(*args):
return datetime(*args)
def _complete_object(o):
"""Takes a hash with an incomplete OSM object description and returns a
complete one.
"""
if o['type'] == 'C':
ret = { 'created_at' : "2005-04-09T19:54:13Z",
'num_changes' : 2, 'closed_at' : "2005-04-09T20:54:39Z",
'open' : "false", 'min_lon' : -0.1465242,
'min_lat' : 51.5288506, 'max_lon' : -0.1464925,
'max_lat' : 51.5288620, 'user' : "Steve", 'uid' : "1",
'tags' : None
}
else:
ret = { 'version' : '1', 'timestamp': "2012-05-01T15:06:20Z",
'changeset' : "11470653", 'uid' : "122294", 'user' : "foo",
'tags' : {}
}
ret.update(o)
if ret['type'] == 'N':
if 'lat' not in ret:
ret['lat'] = random.random()*180 - 90
if 'lon' not in ret:
ret['lon'] = random.random()*360 - 180
return ret
def _write_osm_obj(fd, obj):
if obj['type'] == 'N':
fd.write(('<node id="%(id)d" lat="%(lat).8f" lon="%(lon).8f" version="%(version)s" timestamp="%(timestamp)s" changeset="%(changeset)s" uid="%(uid)s" user="%(user)s"'% obj).encode('utf-8'))
if obj['tags'] is None:
fd.write('/>\n'.encode('utf-8'))
else:
fd.write('>\n'.encode('utf-8'))
for k,v in iter(obj['tags'].items()):
fd.write((' <tag k="%s" v="%s"/>\n' % (k, v)).encode('utf-8'))
fd.write('</node>\n'.encode('utf-8'))
elif obj['type'] == 'W':
fd.write(('<way id="%(id)d" version="%(version)s" changeset="%(changeset)s" timestamp="%(timestamp)s" user="%(user)s" uid="%(uid)s">\n' % obj).encode('utf-8'))
for nd in obj['nodes']:
fd.write(('<nd ref="%s" />\n' % (nd,)).encode('utf-8'))
for k,v in iter(obj['tags'].items()):
fd.write((' <tag k="%s" v="%s"/>\n' % (k, v)).encode('utf-8'))
fd.write('</way>\n'.encode('utf-8'))
elif obj['type'] == 'R':
fd.write(('<relation id="%(id)d" version="%(version)s" changeset="%(changeset)s" timestamp="%(timestamp)s" user="%(user)s" uid="%(uid)s">\n' % obj).encode('utf-8'))
for mem in obj['members']:
fd.write((' <member type="%s" ref="%s" role="%s"/>\n' % mem).encode('utf-8'))
for k,v in iter(obj['tags'].items()):
fd.write((' <tag k="%s" v="%s"/>\n' % (k, v)).encode('utf-8'))
fd.write('</relation>\n'.encode('utf-8'))
elif obj['type'] == 'C':
fd.write(('<changeset id="%(id)d" created_at="%(created_at)s" num_changes="%(num_changes)d" closed_at="%(closed_at)s" open="%(open)s" min_lon="%(min_lon).8f" min_lat="%(min_lat).8f" max_lon="%(max_lon).8f" max_lat="%(max_lat).8f" user="%(user)s" uid="%(uid)s"' % obj).encode('utf-8'))
if obj['tags'] is None:
fd.write('/>\n'.encode('utf-8'))
else:
fd.write('>\n'.encode('utf-8'))
for k,v in iter(obj['tags'].items()):
fd.write((' <tag k="%s" v="%s"/>\n' % (k, v)).encode('utf-8'))
fd.write('</changeset>\n'.encode('utf-8'))
def create_osm_file(data):
"""Creates a temporary osm XML file. The data is a list of OSM objects,
each described by a hash of attributes. Most attributes are optional
and will be filled with sensitive values, if missing. Mandatory are
only `type` and `id`. For ways, nodes are obligatory and for relations
the memberlist.
"""
data.sort(key=lambda x:('NWR'.find(x['type']), x['id']))
with tempfile.NamedTemporaryFile(dir=tempfile.gettempdir(), suffix='.osm', delete=False) as fd:
fname = fd.name
fd.write("<?xml version='1.0' encoding='UTF-8'?>\n".encode('utf-8'))
fd.write('<osm version="0.6" generator="test-pyosmium" timestamp="2014-08-26T20:22:02Z">\n'.encode('utf-8'))
fd.write('\t<bounds minlat="-90" minlon="-180" maxlat="90" maxlon="180"/>\n'.encode('utf-8'))
for obj in data:
_write_osm_obj(fd, _complete_object(obj))
fd.write('</osm>\n'.encode('utf-8'))
return fname
def create_opl_file(data):
with tempfile.NamedTemporaryFile(dir=tempfile.gettempdir(), suffix='.opl', delete=False) as fd:
fname = fd.name
fd.write(dedent(data).encode('utf-8'))
fd.write(b'\n')
return fname
def osmobj(kind, **args):
ret = dict(args)
ret['type'] = kind
return ret
def check_repr(o):
return not str(o).startswith('<') and not repr(o).startswith('<')
class HandlerTestBase:
apply_locations = False
apply_idx = 'sparse_mem_array'
def test_func(self):
if isinstance(self.data, (list, tuple)):
fn = create_osm_file(self.data)
else:
fn = create_opl_file(self.data)
try:
self.handler = self.Handler()
self.handler.apply_file(fn, self.apply_locations, self.apply_idx)
finally:
os.remove(fn)
if hasattr(self, "check_result"):
self.check_result()
class CountingHandler(osmium.SimpleHandler):
def __init__(self):
super(CountingHandler, self).__init__()
self.counts = [0, 0, 0, 0]
def node(self, _):
self.counts[0] += 1
def way(self, _):
self.counts[1] += 1
def relation(self, _):
self.counts[2] += 1
def area(self, _):
self.counts[3] += 1
|
[
"lonvia@denofr.de"
] |
lonvia@denofr.de
|
5a2487e6c8f54b59281e91ac2ee701473fac8a33
|
56550cc0ac59b205cef0e3f69e273f10d33b393e
|
/venv/Scripts/pip3-script.py
|
957e70887db0b75572eb95b8b4eb0cd4e19cea15
|
[] |
no_license
|
sujon13/DRF-Token-JWT-Authentication
|
59eec047fc5cbfa0cd1a0d8e98fa807063372f2e
|
88260cdbff1443f0c05e840e79d383c9781f40ac
|
refs/heads/master
| 2022-04-27T13:48:42.219943
| 2020-04-18T06:03:09
| 2020-04-18T06:03:09
| 256,677,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
#!E:\projects\tutorial_account\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"arifurrahmansujon27@gmail.com"
] |
arifurrahmansujon27@gmail.com
|
4deb0056120bc6883484b9faa69fa227f8b1c28c
|
28773eb2bd842cfc0e0461923c84ca6788d002a4
|
/yysg/yysg/settings.py
|
e4de0592b6f936f2d2f2ca603a62757fe63aeecf
|
[] |
no_license
|
wuji13/test
|
b59d613dfa997f392a9974a0cd01c2cab575e531
|
835ff1e1efb83526c716d5c0ac8064652666fd1f
|
refs/heads/master
| 2021-01-24T07:55:42.599102
| 2017-12-29T03:59:47
| 2017-12-29T03:59:47
| 93,363,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,193
|
py
|
"""
Django settings for yysg project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import pymysql
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*&ikde0_5ex5&1vi6w=zef!(ak7!+y8heux_l61_v$afjz_4hb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'product',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'yysg.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yysg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'yysg',
'USER':'root',
'PASSWORD':'test1234',
'HOST':'127.0.0.1',
'PORT':'3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"liuyunqiang@yunjinkj.com"
] |
liuyunqiang@yunjinkj.com
|
bd32a4d78a2861d4d1f28e8225333e81ec07a769
|
95c1d0121c0e041a4f8f5e2b880af94f05cbdc8a
|
/pygame_example.py
|
fd6ae250197c37d27c53ebf312cda238088c004c
|
[] |
no_license
|
gooseboard/physics_engine
|
1d8e0f66dac8211e8b6e70dbdb1ebc2f681e4005
|
7eeadf4003eaf3b353a3bd34da1017bd76cb9c9d
|
refs/heads/master
| 2021-01-17T17:24:26.694916
| 2016-10-12T03:04:31
| 2016-10-12T03:04:31
| 70,361,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
"""
Simple graphics demo
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
"""
# Import a library of functions called 'pygame'
import pygame
# Initialize the game engine
pygame.init()
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
PI = 3.141592653
# Set the height and width of the screen
size = (400, 500)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Professor Craven's Cool Game")
# Loop until the user clicks the close button.
done = False
clock = pygame.time.Clock()
# Loop as long as done == False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# All drawing code happens after the for loop and but
# inside the main while not done loop.
# Clear the screen and set the screen background
screen.fill(WHITE)
# Draw on the screen a line from (0,0) to (100,100)
# 5 pixels wide.
pygame.draw.line(screen, GREEN, [0, 0], [100, 100], 5)
# Draw on the screen several lines from (0,10) to (100,110)
# 5 pixels wide using a loop
for y_offset in range(0, 100, 10):
pygame.draw.line(screen, RED, [0, 10 + y_offset], [100, 110 + y_offset], 5)
# Draw a rectangle
pygame.draw.rect(screen, BLACK, [20, 20, 250, 100], 2)
# Draw an ellipse, using a rectangle as the outside boundaries
pygame.draw.ellipse(screen, BLACK, [20, 20, 250, 100], 2)
# Draw an arc as part of an ellipse.
# Use radians to determine what angle to draw.
pygame.draw.arc(screen, BLACK, [20, 220, 250, 200], 0, PI / 2, 2)
pygame.draw.arc(screen, GREEN, [20, 220, 250, 200], PI / 2, PI, 2)
pygame.draw.arc(screen, BLUE, [20, 220, 250, 200], PI, 3 * PI / 2, 2)
pygame.draw.arc(screen, RED, [20, 220, 250, 200], 3 * PI / 2, 2 * PI, 2)
# This draws a triangle using the polygon command
pygame.draw.polygon(screen, BLACK, [[100, 100], [0, 200], [200, 200]], 5)
# Select the font to use, size, bold, italics
font = pygame.font.SysFont('Calibri', 25, True, False)
# Render the text. "True" means anti-aliased text.
# Black is the color. This creates an image of the
# letters, but does not put it on the screen
text = font.render("My text", True, BLACK)
# Put the image of the text on the screen at 250x250
screen.blit(text, [250, 250])
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
# This limits the while loop to a max of 60 times per second.
# Leave this out and we will use all CPU we can.
clock.tick(60)
# Be IDLE friendly
pygame.quit()
|
[
"goose@Sufficients-MacBook-Pro.local"
] |
goose@Sufficients-MacBook-Pro.local
|
b2441d178dd2c1c70d071daf48f48b12a962f190
|
6181fcd4a266d963a0ee85971768c97922ca77cd
|
/src/garage/examples/tf/trpo_gym_tf_cartpole_pretrained.py
|
2704f0aabd28acfbb53756e5995b612e5fa89852
|
[
"MIT"
] |
permissive
|
rlworkgroup/garage
|
5d215bbecb3a4e74b504988d6684a7b04df69a80
|
2d594803636e341660cab0e81343abbe9a325353
|
refs/heads/master
| 2023-08-21T22:58:49.338034
| 2023-01-04T06:06:27
| 2023-01-04T06:06:27
| 136,846,372
| 1,832
| 363
|
MIT
| 2023-09-11T11:36:40
| 2018-06-10T21:31:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,666
|
py
|
#!/usr/bin/env python3
"""An example to train a task with TRPO algorithm."""
import tensorflow as tf
from garage import wrap_experiment
from garage.envs import GymEnv
from garage.experiment.deterministic import set_seed
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler import RaySampler
from garage.tf.algos import TRPO
from garage.tf.policies import CategoricalMLPPolicy
from garage.trainer import TFTrainer
@wrap_experiment
def trpo_gym_tf_cartpole(ctxt=None, seed=1):
"""Train TRPO with CartPole-v0 environment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
env = GymEnv('CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
sampler = RaySampler(agents=policy,
envs=env,
max_episode_length=env.spec.max_episode_length,
is_tf_worker=True)
algo = TRPO(
env_spec=env.spec,
policy=policy,
baseline=baseline,
sampler=sampler,
discount=0.99,
max_kl_step=0.01,
)
trainer.setup(algo, env)
trainer.train(n_epochs=10, batch_size=10000, plot=False)
@wrap_experiment
def pre_trained_trpo_cartpole(
ctxt=None,
snapshot_dir='data/local/experiment/trpo_gym_tf_cartpole',
seed=1):
"""Use pre-trained TRPO and reusume experiment.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by Trainer to create the snapshotter.
snapshot_dir (path): directory to snapshot
seed (int): Used to seed the random number generator to produce
determinism.
"""
set_seed(seed)
with TFTrainer(snapshot_config=ctxt) as trainer:
trainer.restore(snapshot_dir)
trainer.resume(n_epochs=30, batch_size=8000)
if __name__ == '__main__':
# To train a new TRPO
log_dir = 'data/local/experiment/trpo_gym_tf_cartpole'
trpo_gym_tf_cartpole(dict(log_dir=log_dir, use_existing_dir=True))
# Clear tensorflow graph
tf.compat.v1.reset_default_graph()
# To use a pretrained TRPO
pre_trained_trpo_cartpole(snapshot_dir=log_dir)
|
[
"noreply@github.com"
] |
rlworkgroup.noreply@github.com
|
1b73123dd77c203780bf626b96384f887a31b859
|
009477e7c41aed4af8d2d031bf72d58e42144cc2
|
/utils/saver.py
|
2b7e707634821769a463db880bf0f8791b2266c0
|
[] |
no_license
|
yonghyeokrhee/KnowledgeExtraction
|
186506e0c6ed0bc81ebe6d584ddd56aaab729a27
|
81dcc53647bf58330d78b3ec986eb07a6e3ac612
|
refs/heads/master
| 2022-11-23T03:14:06.119129
| 2020-07-22T14:06:43
| 2020-07-22T14:06:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import h5py
class VCRSaver():
def __init__(self, path, file_name, number, embedding=False):
self.path = path
self.file_name = file_name
self.saver = h5py.File(f'{self.path}_{self.file_name}','w')
self.number = number
self.embedding = embedding
def save(self, answer_results, rationale_results):
print('=====================================start!======================================')
# make h5 group
for i in range(self.number):
self.saver.create_group(f'{i}')
group = self.saver[f'{i}']
for k in range(4):
group.create_dataset(f'answer_{i}', data=answer_results[k][i])
group.create_dataset(f'rationale_{i}', data=rationale_results[k][i])
print('file path : ',self.path)
print('file name : ',self.file_name)
print('=====================================finish!=====================================')
|
[
"wodbs9522@gmail.com"
] |
wodbs9522@gmail.com
|
f3628d9a0e62a2f1395208e3abd318fdf019c825
|
8d5d7fc535c190bab13993ebe983c10d292e9e52
|
/snakethree/demo3.py
|
c934ba16982f3591eeb88f283e792bf5b8f7db77
|
[] |
no_license
|
songxiaowei/coding
|
58f5e8c2be3adcd8acad53ed6b43fd2d9759b270
|
273b8046d39b6d1ccc2d757b0f9272deb4cc5c7a
|
refs/heads/master
| 2021-01-12T17:18:07.566394
| 2016-10-01T09:56:13
| 2016-10-01T09:56:13
| 69,480,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
#coding=utf-8
import re
import urllib
import MySQLdb
#升级版本
#插入爬取的价格到Mysql中
#2016-09-25
conn = MySQLdb.connect(
host = 'localhost',
port = 3306,
user = 'root',
passwd = 'root',
db = 'villa',
)
cur = conn.cursor()
#cur.execute('create table money_20160925(id varchar(20))')
def getHtml(url):
page = urllib.urlopen(url)
html = page.read()
return html
def getMoney(html):
reg = r'</span>(.*?)<span class="tag_buy">'
context = re.compile(reg)
p = re.findall(context,html)
#for a in p:
cur.execute("insert into money_20160925 values('songwei')")
#cur.execute("insert into money_20160925 values('songwei')") #这里是可以插入数据的
cur.close()
conn.commit()
conn.close()
|
[
"1033712089@qq.com"
] |
1033712089@qq.com
|
a4b8ac47d62d25126dcdb5fdc2dd5d5017084d08
|
2869808c9f4fdb820281a8521a62208766a0aa43
|
/pythonLeetcode/94.二叉树的中序遍历.py
|
d5bf7b7d06ea2a4be74ddb58f0f52a78effddc89
|
[] |
no_license
|
BaoziSwifter/MyPythonLeetCode
|
03dcfa0705ad87a1b727fe2f924fabb7184c4ca1
|
0e8f0d902a379c07c386aedc6d10d7a2aa6d1b4a
|
refs/heads/master
| 2023-01-09T08:15:23.154464
| 2022-12-25T02:35:16
| 2022-12-25T02:35:16
| 220,202,819
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,551
|
py
|
#
# @lc app=leetcode.cn id=94 lang=python
#
# [94] 二叉树的中序遍历
#
# https://leetcode-cn.com/problems/binary-tree-inorder-traversal/description/
#
# algorithms
# Medium (68.81%)
# Likes: 315
# Dislikes: 0
# Total Accepted: 75.2K
# Total Submissions: 109.2K
# Testcase Example: '[1,null,2,3]'
#
# 给定一个二叉树,返回它的中序 遍历。
#
# 示例:
#
# 输入: [1,null,2,3]
# 1
# \
# 2
# /
# 3
#
# 输出: [1,3,2]
#
# 进阶: 递归算法很简单,你可以通过迭代算法完成吗?
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 递归
# 68/68 cases passed (16 ms)
# Your runtime beats 74.1 % of python submissions
# Your memory usage beats 5.53 % of python submissions (12 MB)
class Solution1(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def traversal(root=None):
if root == None:
return
traversal(root.left)
res.append(root.val)
traversal(root.right)
res = []
traversal(root)
return res
# 基于栈原理
# 68/68 cases passed (16 ms)
# Your runtime beats 91.06 % of python submissions
# Your memory usage beats 28.3 % of python submissions (11.7 MB)
class Solution2(object):
def inorderTraversal(self, root):
if root == None:
return None
res = []
myStack = []
while root or len(myStack):
while root:
myStack.append(root)
root = root.left
root = myStack.pop()
res.append(root.val)
root = root.right
return res
# 线索二叉树 莫里斯遍历
# 68/68 cases passed (8 ms)
# Your runtime beats 99.77 % of python submissions
# Your memory usage beats 21.51 % of python submissions (11.8 MB)
class Solution(object):
def inorderTraversal(self, root):
res= []
left = None
while root:
if root.left == None:
res.append(root.val)
root = root.right
else:
left = root.left
while left.right:
left = left.right
left.right = root
tmp = root
root = root.left
tmp.left = None
return res
# @lc code=end
|
[
"duanlongfed@126.com"
] |
duanlongfed@126.com
|
74be7fccb83335d781e061b0705580fca8b880b1
|
202180e6b7109e9058cce442054d6532c44c796d
|
/example_crm/dev_test.py
|
12ec85a8bafb5505e957738fa0e222fccc2dd04d
|
[
"Apache-2.0"
] |
permissive
|
pkimber/old-crm-migrated-to-gitlab
|
230d4eec0cfa794c90fff8c75154d98699820093
|
835e8ff3161404316b7da35cf61e3851763b37b9
|
refs/heads/master
| 2021-06-15T22:07:08.207855
| 2017-04-27T21:05:53
| 2017-04-27T21:05:53
| 12,544,468
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'temp.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
|
[
"code@pkimber.net"
] |
code@pkimber.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.